Merge tag 'drm-intel-next-2017-01-23' of git://anongit.freedesktop.org/git/drm-intel into drm-next

Final block of feature work for 4.11:

- gen8 pd cleanup from Matthew Auld
- more cleanups for view/vma (Chris)
- dmc support on glk (Anusha Srivatsa)
- use core crc api (Tomue)
- track wedged requests using fence.error (Chris)
- lots of psr fixes (Nagaraju, Vathsala)
- dp mst support, acked for merging through drm-intel by Takashi
  (Libin)
- huc loading support, including uapi for libva to use it (Anusha
  Srivatsa)

* tag 'drm-intel-next-2017-01-23' of git://anongit.freedesktop.org/git/drm-intel: (111 commits)
  drm/i915: Update DRIVER_DATE to 20170123
  drm/i915: reinstate call to trace_i915_vma_bind
  drm/i915: Assert that created vma has a whole number of pages
  drm/i915: Assert the drm_mm_node is allocated when on the VM lists
  drm/i915: Treat an error from i915_vma_instance() as unlikely
  drm/i915: Reject vma creation larger than address space
  drm/i915: Use common LRU inactive vma bumping for unpin_from_display
  drm/i915: Do an unlocked wait before set-cache-level ioctl
  drm/i915/huc: Assert that HuC vma is placed in GuC accessible range
  drm/i915/huc: Avoid attempting to authenticate non-existent fw
  drm/i915: Set adjustment to zero on Up/Down interrupts if freq is already max/min
  drm/i915: Remove the double handling of 'flags from intel_mode_from_pipe_config()
  drm/i915: Remove crtc->config usage from intel_modeset_readout_hw_state()
  drm/i915: Release temporary load-detect state upon switching
  drm/i915: Remove i915_gem_object_to_ggtt()
  drm/i915: Remove i915_vma_create from VMA API
  drm/i915: Add a check that the VMA instance we lookup matches the request
  drm/i915: Rename some warts in the VMA API
  drm/i915: Track pinned vma in intel_plane_state
  drm/i915/get_params: Add HuC status to getparams
  ...
This commit is contained in:
Dave Airlie
2017-01-27 12:08:32 +10:00
58 changed files with 3257 additions and 1891 deletions

View File

@@ -56,7 +56,9 @@ i915-y += i915_cmd_parser.o \
# general-purpose microcontroller (GuC) support
i915-y += intel_uc.o \
intel_guc_log.o \
intel_guc_loader.o \
intel_huc.o \
i915_guc_submission.o
# autogenerated null render state

View File

@@ -41,47 +41,34 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
u32 alloc_flag, search_flag;
unsigned int flags;
u64 start, end, size;
struct drm_mm_node *node;
int retried = 0;
int ret;
if (high_gm) {
search_flag = DRM_MM_SEARCH_BELOW;
alloc_flag = DRM_MM_CREATE_TOP;
node = &vgpu->gm.high_gm_node;
size = vgpu_hidden_sz(vgpu);
start = gvt_hidden_gmadr_base(gvt);
end = gvt_hidden_gmadr_end(gvt);
flags = PIN_HIGH;
} else {
search_flag = DRM_MM_SEARCH_DEFAULT;
alloc_flag = DRM_MM_CREATE_DEFAULT;
node = &vgpu->gm.low_gm_node;
size = vgpu_aperture_sz(vgpu);
start = gvt_aperture_gmadr_base(gvt);
end = gvt_aperture_gmadr_end(gvt);
flags = PIN_MAPPABLE;
}
mutex_lock(&dev_priv->drm.struct_mutex);
search_again:
ret = drm_mm_insert_node_in_range_generic(&dev_priv->ggtt.base.mm,
node, size, 4096,
I915_COLOR_UNEVICTABLE,
start, end, search_flag,
alloc_flag);
if (ret) {
ret = i915_gem_evict_something(&dev_priv->ggtt.base,
size, 4096,
I915_COLOR_UNEVICTABLE,
start, end, 0);
if (ret == 0 && ++retried < 3)
goto search_again;
gvt_err("fail to alloc %s gm space from host, retried %d\n",
high_gm ? "high" : "low", retried);
}
ret = i915_gem_gtt_insert(&dev_priv->ggtt.base, node,
size, 4096, I915_COLOR_UNEVICTABLE,
start, end, flags);
mutex_unlock(&dev_priv->drm.struct_mutex);
if (ret)
gvt_err("fail to alloc %s gm space from host\n",
high_gm ? "high" : "low");
return ret;
}

View File

@@ -159,8 +159,35 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
i915_vma_is_ggtt(vma) ? "g" : "pp",
vma->node.start, vma->node.size);
if (i915_vma_is_ggtt(vma))
seq_printf(m, ", type: %u", vma->ggtt_view.type);
if (i915_vma_is_ggtt(vma)) {
switch (vma->ggtt_view.type) {
case I915_GGTT_VIEW_NORMAL:
seq_puts(m, ", normal");
break;
case I915_GGTT_VIEW_PARTIAL:
seq_printf(m, ", partial [%08llx+%x]",
vma->ggtt_view.partial.offset << PAGE_SHIFT,
vma->ggtt_view.partial.size << PAGE_SHIFT);
break;
case I915_GGTT_VIEW_ROTATED:
seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
vma->ggtt_view.rotated.plane[0].width,
vma->ggtt_view.rotated.plane[0].height,
vma->ggtt_view.rotated.plane[0].stride,
vma->ggtt_view.rotated.plane[0].offset,
vma->ggtt_view.rotated.plane[1].width,
vma->ggtt_view.rotated.plane[1].height,
vma->ggtt_view.rotated.plane[1].stride,
vma->ggtt_view.rotated.plane[1].offset);
break;
default:
MISSING_CASE(vma->ggtt_view.type);
break;
}
}
if (vma->fence)
seq_printf(m, " , fence: %d%s",
vma->fence->id,
@@ -2325,10 +2352,40 @@ static int i915_llc(struct seq_file *m, void *data)
return 0;
}
static int i915_huc_load_status_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
if (!HAS_HUC_UCODE(dev_priv))
return 0;
seq_puts(m, "HuC firmware status:\n");
seq_printf(m, "\tpath: %s\n", huc_fw->path);
seq_printf(m, "\tfetch: %s\n",
intel_uc_fw_status_repr(huc_fw->fetch_status));
seq_printf(m, "\tload: %s\n",
intel_uc_fw_status_repr(huc_fw->load_status));
seq_printf(m, "\tversion wanted: %d.%d\n",
huc_fw->major_ver_wanted, huc_fw->minor_ver_wanted);
seq_printf(m, "\tversion found: %d.%d\n",
huc_fw->major_ver_found, huc_fw->minor_ver_found);
seq_printf(m, "\theader: offset is %d; size = %d\n",
huc_fw->header_offset, huc_fw->header_size);
seq_printf(m, "\tuCode: offset is %d; size = %d\n",
huc_fw->ucode_offset, huc_fw->ucode_size);
seq_printf(m, "\tRSA: offset is %d; size = %d\n",
huc_fw->rsa_offset, huc_fw->rsa_size);
seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
return 0;
}
static int i915_guc_load_status_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
u32 tmp, i;
if (!HAS_GUC_UCODE(dev_priv))
@@ -2336,15 +2393,15 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
seq_printf(m, "GuC firmware status:\n");
seq_printf(m, "\tpath: %s\n",
guc_fw->guc_fw_path);
guc_fw->path);
seq_printf(m, "\tfetch: %s\n",
intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
intel_uc_fw_status_repr(guc_fw->fetch_status));
seq_printf(m, "\tload: %s\n",
intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
intel_uc_fw_status_repr(guc_fw->load_status));
seq_printf(m, "\tversion wanted: %d.%d\n",
guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
guc_fw->major_ver_wanted, guc_fw->minor_ver_wanted);
seq_printf(m, "\tversion found: %d.%d\n",
guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found);
guc_fw->major_ver_found, guc_fw->minor_ver_found);
seq_printf(m, "\theader: offset is %d; size = %d\n",
guc_fw->header_offset, guc_fw->header_size);
seq_printf(m, "\tuCode: offset is %d; size = %d\n",
@@ -2532,6 +2589,29 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops,
i915_guc_log_control_get, i915_guc_log_control_set,
"%lld\n");
static const char *psr2_live_status(u32 val)
{
static const char * const live_status[] = {
"IDLE",
"CAPTURE",
"CAPTURE_FS",
"SLEEP",
"BUFON_FW",
"ML_UP",
"SU_STANDBY",
"FAST_SLEEP",
"DEEP_SLEEP",
"BUF_ON",
"TG_ON"
};
val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT;
if (val < ARRAY_SIZE(live_status))
return live_status[val];
return "unknown";
}
static int i915_edp_psr_status(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -2606,6 +2686,12 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
seq_printf(m, "Performance_Counter: %u\n", psrperf);
}
if (dev_priv->psr.psr2_support) {
u32 psr2 = I915_READ(EDP_PSR2_STATUS_CTL);
seq_printf(m, "EDP_PSR2_STATUS_CTL: %x [%s]\n",
psr2, psr2_live_status(psr2));
}
mutex_unlock(&dev_priv->psr.lock);
intel_runtime_pm_put(dev_priv);
@@ -4553,6 +4639,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_guc_info", i915_guc_info, 0},
{"i915_guc_load_status", i915_guc_load_status_info, 0},
{"i915_guc_log_dump", i915_guc_log_dump, 0},
{"i915_huc_load_status", i915_huc_load_status_info, 0},
{"i915_frequency_info", i915_frequency_info, 0},
{"i915_hangcheck_info", i915_hangcheck_info, 0},
{"i915_drpc_info", i915_drpc_info, 0},

View File

@@ -49,6 +49,7 @@
#include "i915_trace.h"
#include "i915_vgpu.h"
#include "intel_drv.h"
#include "intel_uc.h"
static struct drm_driver driver;
@@ -315,6 +316,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_MIN_EU_IN_POOL:
value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool;
break;
case I915_PARAM_HUC_STATUS:
/* The register is already force-woken. We dont need
* any rpm here
*/
value = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED;
break;
case I915_PARAM_MMAP_GTT_VERSION:
/* Though we've started our numbering from 1, and so class all
* earlier versions as 0, in effect their value is undefined as
@@ -599,6 +606,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_irq;
intel_huc_init(dev_priv);
intel_guc_init(dev_priv);
ret = i915_gem_init(dev_priv);
@@ -627,6 +635,7 @@ cleanup_gem:
i915_gem_fini(dev_priv);
cleanup_irq:
intel_guc_fini(dev_priv);
intel_huc_fini(dev_priv);
drm_irq_uninstall(dev);
intel_teardown_gmbus(dev_priv);
cleanup_csr:
@@ -1114,7 +1123,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
/* Reveal our presence to userspace */
if (drm_dev_register(dev, 0) == 0) {
i915_debugfs_register(dev_priv);
i915_guc_register(dev_priv);
i915_guc_log_register(dev_priv);
i915_setup_sysfs(dev_priv);
/* Depends on sysfs having been initialized */
@@ -1158,7 +1167,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
i915_perf_unregister(dev_priv);
i915_teardown_sysfs(dev_priv);
i915_guc_unregister(dev_priv);
i915_guc_log_unregister(dev_priv);
i915_debugfs_unregister(dev_priv);
drm_dev_unregister(&dev_priv->drm);
@@ -1314,6 +1323,7 @@ void i915_driver_unload(struct drm_device *dev)
drain_workqueue(dev_priv->wq);
intel_guc_fini(dev_priv);
intel_huc_fini(dev_priv);
i915_gem_fini(dev_priv);
intel_fbc_cleanup_cfb(dev_priv);
@@ -1471,7 +1481,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
intel_display_set_init_power(dev_priv, false);
fw_csr = !IS_BROXTON(dev_priv) &&
fw_csr = !IS_GEN9_LP(dev_priv) &&
suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
/*
* In case of firmware assisted context save/restore don't manually
@@ -1484,7 +1494,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
intel_power_domains_suspend(dev_priv);
ret = 0;
if (IS_BROXTON(dev_priv))
if (IS_GEN9_LP(dev_priv))
bxt_enable_dc9(dev_priv);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_enable_pc8(dev_priv);
@@ -1692,7 +1702,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_uncore_early_sanitize(dev_priv, true);
if (IS_BROXTON(dev_priv)) {
if (IS_GEN9_LP(dev_priv)) {
if (!dev_priv->suspended_to_idle)
gen9_sanitize_dc_state(dev_priv);
bxt_disable_dc9(dev_priv);
@@ -1702,7 +1712,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_uncore_sanitize(dev_priv);
if (IS_BROXTON(dev_priv) ||
if (IS_GEN9_LP(dev_priv) ||
!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
intel_power_domains_init_hw(dev_priv, true);
@@ -1728,25 +1738,9 @@ static int i915_resume_switcheroo(struct drm_device *dev)
return i915_drm_resume(dev);
}
static void disable_engines_irq(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
/* Ensure irq handler finishes, and not run again. */
disable_irq(dev_priv->drm.irq);
for_each_engine(engine, dev_priv, id)
tasklet_kill(&engine->irq_tasklet);
}
static void enable_engines_irq(struct drm_i915_private *dev_priv)
{
enable_irq(dev_priv->drm.irq);
}
/**
* i915_reset - reset chip after a hang
* @dev: drm device to reset
* @dev_priv: device private to reset
*
* Reset the chip. Useful if a hang is detected. Marks the device as wedged
* on failure.
@@ -1776,12 +1770,15 @@ void i915_reset(struct drm_i915_private *dev_priv)
error->reset_count++;
pr_notice("drm/i915: Resetting chip after gpu hang\n");
i915_gem_reset_prepare(dev_priv);
disable_irq(dev_priv->drm.irq);
ret = i915_gem_reset_prepare(dev_priv);
if (ret) {
DRM_ERROR("GPU recovery failed\n");
intel_gpu_reset(dev_priv, ALL_ENGINES);
goto error;
}
disable_engines_irq(dev_priv);
ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
enable_engines_irq(dev_priv);
if (ret) {
if (ret != -ENODEV)
DRM_ERROR("Failed to reset chip: %i\n", ret);
@@ -1816,6 +1813,7 @@ void i915_reset(struct drm_i915_private *dev_priv)
i915_queue_hangcheck(dev_priv);
wakeup:
enable_irq(dev_priv->drm.irq);
wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS);
return;
@@ -2326,7 +2324,7 @@ static int intel_runtime_suspend(struct device *kdev)
intel_runtime_pm_disable_interrupts(dev_priv);
ret = 0;
if (IS_BROXTON(dev_priv)) {
if (IS_GEN9_LP(dev_priv)) {
bxt_display_core_uninit(dev_priv);
bxt_enable_dc9(dev_priv);
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -2411,7 +2409,7 @@ static int intel_runtime_resume(struct device *kdev)
if (IS_GEN6(dev_priv))
intel_init_pch_refclk(dev_priv);
if (IS_BROXTON(dev_priv)) {
if (IS_GEN9_LP(dev_priv)) {
bxt_disable_dc9(dev_priv);
bxt_display_core_init(dev_priv, true);
if (dev_priv->csr.dmc_payload &&
@@ -2549,8 +2547,8 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),

View File

@@ -78,8 +78,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20170109"
#define DRIVER_TIMESTAMP 1483953121
#define DRIVER_DATE "20170123"
#define DRIVER_TIMESTAMP 1485156432
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@@ -1069,6 +1069,8 @@ struct intel_fbc {
struct work_struct underrun_work;
struct intel_fbc_state_cache {
struct i915_vma *vma;
struct {
unsigned int mode_flags;
uint32_t hsw_bdw_pixel_rate;
@@ -1082,15 +1084,14 @@ struct intel_fbc {
} plane;
struct {
u64 ilk_ggtt_offset;
const struct drm_format_info *format;
unsigned int stride;
int fence_reg;
unsigned int tiling_mode;
} fb;
} state_cache;
struct intel_fbc_reg_params {
struct i915_vma *vma;
struct {
enum pipe pipe;
enum plane plane;
@@ -1098,10 +1099,8 @@ struct intel_fbc {
} crtc;
struct {
u64 ggtt_offset;
const struct drm_format_info *format;
unsigned int stride;
int fence_reg;
} fb;
int cfb_size;
@@ -1154,6 +1153,9 @@ struct i915_psr {
bool psr2_support;
bool aux_frame_sync;
bool link_standby;
bool y_cord_support;
bool colorimetry_support;
bool alpm;
};
enum intel_pch {
@@ -1809,6 +1811,7 @@ struct intel_pipe_crc {
enum intel_pipe_crc_source source;
int head, tail;
wait_queue_head_t wq;
int skipped;
};
struct i915_frontbuffer_tracking {
@@ -2069,6 +2072,7 @@ struct drm_i915_private {
struct intel_gvt *gvt;
struct intel_huc huc;
struct intel_guc guc;
struct intel_csr csr;
@@ -2843,6 +2847,7 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_GUC(dev_priv) ((dev_priv)->info.has_guc)
#define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
#define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv))
#define HAS_HUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
#define HAS_RESOURCE_STREAMER(dev_priv) ((dev_priv)->info.has_resource_streamer)
@@ -3101,10 +3106,10 @@ int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_init_userptr(struct drm_i915_private *dev_priv);
int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -3323,7 +3328,7 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
return READ_ONCE(error->reset_count);
}
void i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
@@ -3360,11 +3365,6 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
int i915_gem_open(struct drm_device *dev, struct drm_file *file);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv, u64 size,
int tiling_mode);
u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
int tiling_mode, bool fenced);
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
@@ -3374,36 +3374,12 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags);
struct i915_vma *
i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view);
struct i915_vma *
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view);
static inline struct i915_hw_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space *vm)
{
return container_of(vm, struct i915_hw_ppgtt, base);
}
static inline struct i915_vma *
i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view)
{
return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view);
}
static inline unsigned long
i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o,
const struct i915_ggtt_view *view)
{
return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view));
}
/* i915_gem_fence_reg.c */
int __must_check i915_vma_get_fence(struct i915_vma *vma);
int __must_check i915_vma_put_fence(struct i915_vma *vma);
@@ -3471,8 +3447,9 @@ int __must_check i915_gem_evict_something(struct i915_address_space *vm,
unsigned cache_level,
u64 start, u64 end,
unsigned flags);
int __must_check i915_gem_evict_for_vma(struct i915_vma *vma,
unsigned int flags);
int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
struct drm_mm_node *node,
unsigned int flags);
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
/* belongs in i915_gem_gtt.h */
@@ -3506,7 +3483,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
/* i915_gem_internal.c */
struct drm_i915_gem_object *
i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
unsigned int size);
phys_addr_t size);
/* i915_gem_shrinker.c */
unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
@@ -3531,6 +3508,11 @@ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_objec
i915_gem_object_is_tiled(obj);
}
u32 i915_gem_fence_size(struct drm_i915_private *dev_priv, u32 size,
unsigned int tiling, unsigned int stride);
u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size,
unsigned int tiling, unsigned int stride);
/* i915_debugfs.c */
#ifdef CONFIG_DEBUG_FS
int i915_debugfs_register(struct drm_i915_private *dev_priv);

View File

@@ -1696,12 +1696,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
{
u64 size;
size = i915_gem_object_get_stride(obj);
size *= i915_gem_object_get_tiling(obj) == I915_TILING_Y ? 32 : 8;
return size >> PAGE_SHIFT;
return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
}
/**
@@ -1754,6 +1749,29 @@ int i915_gem_mmap_gtt_version(void)
return 1;
}
static inline struct i915_ggtt_view
compute_partial_view(struct drm_i915_gem_object *obj,
pgoff_t page_offset,
unsigned int chunk)
{
struct i915_ggtt_view view;
if (i915_gem_object_is_tiled(obj))
chunk = roundup(chunk, tile_row_pages(obj));
view.type = I915_GGTT_VIEW_PARTIAL;
view.partial.offset = rounddown(page_offset, chunk);
view.partial.size =
min_t(unsigned int, chunk,
(obj->base.size >> PAGE_SHIFT) - view.partial.offset);
/* If the partial covers the entire object, just create a normal VMA. */
if (chunk >= obj->base.size >> PAGE_SHIFT)
view.type = I915_GGTT_VIEW_NORMAL;
return view;
}
/**
* i915_gem_fault - fault a page into the GTT
* @area: CPU VMA in question
@@ -1830,26 +1848,9 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
/* Now pin it into the GTT as needed */
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
if (IS_ERR(vma)) {
struct i915_ggtt_view view;
unsigned int chunk_size;
/* Use a partial view if it is bigger than available space */
chunk_size = MIN_CHUNK_PAGES;
if (i915_gem_object_is_tiled(obj))
chunk_size = roundup(chunk_size, tile_row_pages(obj));
memset(&view, 0, sizeof(view));
view.type = I915_GGTT_VIEW_PARTIAL;
view.params.partial.offset = rounddown(page_offset, chunk_size);
view.params.partial.size =
min_t(unsigned int, chunk_size,
vma_pages(area) - view.params.partial.offset);
/* If the partial covers the entire object, just create a
* normal VMA.
*/
if (chunk_size >= obj->base.size >> PAGE_SHIFT)
view.type = I915_GGTT_VIEW_NORMAL;
struct i915_ggtt_view view =
compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
/* Userspace is now writing through an untracked VMA, abandon
* all hope that the hardware is able to track future writes.
@@ -1878,7 +1879,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
/* Finally, remap it using the new GTT offset */
ret = remap_io_mapping(area,
area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT),
area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
(ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
min_t(u64, vma->size, area->vm_end - area->vm_start),
&ggtt->mappable);
@@ -2021,69 +2022,6 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
}
}
/**
* i915_gem_get_ggtt_size - return required global GTT size for an object
* @dev_priv: i915 device
* @size: object size
* @tiling_mode: tiling mode
*
* Return the required global GTT size for an object, taking into account
* potential fence register mapping.
*/
u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv,
u64 size, int tiling_mode)
{
u64 ggtt_size;
GEM_BUG_ON(size == 0);
if (INTEL_GEN(dev_priv) >= 4 ||
tiling_mode == I915_TILING_NONE)
return size;
/* Previous chips need a power-of-two fence region when tiling */
if (IS_GEN3(dev_priv))
ggtt_size = 1024*1024;
else
ggtt_size = 512*1024;
while (ggtt_size < size)
ggtt_size <<= 1;
return ggtt_size;
}
/**
* i915_gem_get_ggtt_alignment - return required global GTT alignment
* @dev_priv: i915 device
* @size: object size
* @tiling_mode: tiling mode
* @fenced: is fenced alignment required or not
*
* Return the required global GTT alignment for an object, taking into account
* potential fence register mapping.
*/
u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
int tiling_mode, bool fenced)
{
GEM_BUG_ON(size == 0);
/*
* Minimum alignment is 4k (GTT page size), but might be greater
* if a fence register is needed for the object.
*/
if (INTEL_GEN(dev_priv) >= 4 ||
(!fenced && (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))) ||
tiling_mode == I915_TILING_NONE)
return 4096;
/*
* Previous chips need to be aligned to the size of the smallest
* fence register that can contain the object.
*/
return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode);
}
static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
@@ -2666,13 +2604,52 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
if (__i915_gem_request_completed(request))
continue;
GEM_BUG_ON(request->engine != engine);
return request;
}
return NULL;
}
static void reset_request(struct drm_i915_gem_request *request)
static bool engine_stalled(struct intel_engine_cs *engine)
{
if (!engine->hangcheck.stalled)
return false;
/* Check for possible seqno movement after hang declaration */
if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) {
DRM_DEBUG_DRIVER("%s pardoned\n", engine->name);
return false;
}
return true;
}
int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = 0;
/* Ensure irq handler finishes, and not run again. */
for_each_engine(engine, dev_priv, id) {
struct drm_i915_gem_request *request;
tasklet_kill(&engine->irq_tasklet);
if (engine_stalled(engine)) {
request = i915_gem_find_active_request(engine);
if (request && request->fence.error == -EIO)
err = -EIO; /* Previous reset failed! */
}
}
i915_gem_revoke_fences(dev_priv);
return err;
}
static void skip_request(struct drm_i915_gem_request *request)
{
void *vaddr = request->ring->vaddr;
u32 head;
@@ -2687,20 +2664,74 @@ static void reset_request(struct drm_i915_gem_request *request)
head = 0;
}
memset(vaddr + head, 0, request->postfix - head);
dma_fence_set_error(&request->fence, -EIO);
}
void i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
static void engine_skip_context(struct drm_i915_gem_request *request)
{
i915_gem_revoke_fences(dev_priv);
struct intel_engine_cs *engine = request->engine;
struct i915_gem_context *hung_ctx = request->ctx;
struct intel_timeline *timeline;
unsigned long flags;
timeline = i915_gem_context_lookup_timeline(hung_ctx, engine);
spin_lock_irqsave(&engine->timeline->lock, flags);
spin_lock(&timeline->lock);
list_for_each_entry_continue(request, &engine->timeline->requests, link)
if (request->ctx == hung_ctx)
skip_request(request);
list_for_each_entry(request, &timeline->requests, link)
skip_request(request);
spin_unlock(&timeline->lock);
spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
/* Returns true if the request was guilty of hang */
static bool i915_gem_reset_request(struct drm_i915_gem_request *request)
{
/* Read once and return the resolution */
const bool guilty = engine_stalled(request->engine);
/* The guilty request will get skipped on a hung engine.
*
* Users of client default contexts do not rely on logical
* state preserved between batches so it is safe to execute
* queued requests following the hang. Non default contexts
* rely on preserved state, so skipping a batch loses the
* evolution of the state and it needs to be considered corrupted.
* Executing more queued batches on top of corrupted state is
* risky. But we take the risk by trying to advance through
* the queued requests in order to make the client behaviour
* more predictable around resets, by not throwing away random
* amount of batches it has prepared for execution. Sophisticated
* clients can use gem_reset_stats_ioctl and dma fence status
* (exported via sync_file info ioctl on explicit fences) to observe
* when it loses the context state and should rebuild accordingly.
*
* The context ban, and ultimately the client ban, mechanism are safety
* valves if client submission ends up resulting in nothing more than
* subsequent hangs.
*/
if (guilty) {
i915_gem_context_mark_guilty(request->ctx);
skip_request(request);
} else {
i915_gem_context_mark_innocent(request->ctx);
dma_fence_set_error(&request->fence, -EAGAIN);
}
return guilty;
}
static void i915_gem_reset_engine(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *request;
struct i915_gem_context *hung_ctx;
struct intel_timeline *timeline;
unsigned long flags;
bool ring_hung;
if (engine->irq_seqno_barrier)
engine->irq_seqno_barrier(engine);
@@ -2709,22 +2740,7 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
if (!request)
return;
hung_ctx = request->ctx;
ring_hung = engine->hangcheck.stalled;
if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) {
DRM_DEBUG_DRIVER("%s pardoned, was guilty? %s\n",
engine->name,
yesno(ring_hung));
ring_hung = false;
}
if (ring_hung)
i915_gem_context_mark_guilty(hung_ctx);
else
i915_gem_context_mark_innocent(hung_ctx);
if (!ring_hung)
if (!i915_gem_reset_request(request))
return;
DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
@@ -2734,34 +2750,8 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
engine->reset_hw(engine, request);
/* If this context is now banned, skip all of its pending requests. */
if (!i915_gem_context_is_banned(hung_ctx))
return;
/* Users of the default context do not rely on logical state
* preserved between batches. They have to emit full state on
* every batch and so it is safe to execute queued requests following
* the hang.
*
* Other contexts preserve state, now corrupt. We want to skip all
* queued requests that reference the corrupt context.
*/
if (i915_gem_context_is_default(hung_ctx))
return;
timeline = i915_gem_context_lookup_timeline(hung_ctx, engine);
spin_lock_irqsave(&engine->timeline->lock, flags);
spin_lock(&timeline->lock);
list_for_each_entry_continue(request, &engine->timeline->requests, link)
if (request->ctx == hung_ctx)
reset_request(request);
list_for_each_entry(request, &timeline->requests, link)
reset_request(request);
spin_unlock(&timeline->lock);
spin_unlock_irqrestore(&engine->timeline->lock, flags);
if (i915_gem_context_is_banned(request->ctx))
engine_skip_context(request);
}
void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
@@ -2788,12 +2778,16 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
static void nop_submit_request(struct drm_i915_gem_request *request)
{
dma_fence_set_error(&request->fence, -EIO);
i915_gem_request_submit(request);
intel_engine_init_global_seqno(request->engine, request->global_seqno);
}
static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
static void engine_set_wedged(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *request;
unsigned long flags;
/* We need to be sure that no thread is running the old callback as
* we install the nop handler (otherwise we would submit a request
* to hardware that will never complete). In order to prevent this
@@ -2802,6 +2796,12 @@ static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
*/
engine->submit_request = nop_submit_request;
/* Mark all executing requests as skipped */
spin_lock_irqsave(&engine->timeline->lock, flags);
list_for_each_entry(request, &engine->timeline->requests, link)
dma_fence_set_error(&request->fence, -EIO);
spin_unlock_irqrestore(&engine->timeline->lock, flags);
/* Mark all pending requests as complete so that any concurrent
* (lockless) lookup doesn't try and wait upon the request as we
* reset it.
@@ -2837,7 +2837,7 @@ static int __i915_gem_set_wedged_BKL(void *data)
enum intel_engine_id id;
for_each_engine(engine, i915, id)
i915_gem_cleanup_engine(engine);
engine_set_wedged(engine);
return 0;
}
@@ -3397,7 +3397,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_caching *args = data;
struct drm_i915_gem_object *obj;
enum i915_cache_level level;
int ret;
int ret = 0;
switch (args->caching) {
case I915_CACHING_NONE:
@@ -3422,20 +3422,29 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
obj = i915_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
if (obj->cache_level == level)
goto out;
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT,
to_rps_client(file));
if (ret)
goto out;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
obj = i915_gem_object_lookup(file, args->handle);
if (!obj) {
ret = -ENOENT;
goto unlock;
}
goto out;
ret = i915_gem_object_set_cache_level(obj, level);
i915_gem_object_put(obj);
unlock:
mutex_unlock(&dev->struct_mutex);
out:
i915_gem_object_put(obj);
return ret;
}
@@ -3485,7 +3494,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
* try to preserve the existing ABI).
*/
vma = ERR_PTR(-ENOSPC);
if (view->type == I915_GGTT_VIEW_NORMAL)
if (!view || view->type == I915_GGTT_VIEW_NORMAL)
vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
PIN_MAPPABLE | PIN_NONBLOCK);
if (IS_ERR(vma)) {
@@ -3544,11 +3553,10 @@ i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
return;
if (--vma->obj->pin_display == 0)
vma->display_alignment = 0;
vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
/* Bump the LRU to try and avoid premature eviction whilst flipping */
if (!i915_vma_is_active(vma))
list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
i915_gem_object_bump_inactive_ggtt(vma->obj);
i915_vma_unpin(vma);
}
@@ -3679,8 +3687,8 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
lockdep_assert_held(&obj->base.dev->struct_mutex);
vma = i915_gem_obj_lookup_or_create_vma(obj, vm, view);
if (IS_ERR(vma))
vma = i915_vma_instance(obj, vm, view);
if (unlikely(IS_ERR(vma)))
return vma;
if (i915_vma_misplaced(vma, size, alignment, flags)) {
@@ -3689,10 +3697,6 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
return ERR_PTR(-ENOSPC);
if (flags & PIN_MAPPABLE) {
u32 fence_size;
fence_size = i915_gem_get_ggtt_size(dev_priv, vma->size,
i915_gem_object_get_tiling(obj));
/* If the required space is larger than the available
* aperture, we will not able to find a slot for the
* object and unbinding the object now will be in
@@ -3700,7 +3704,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
* the object in and out of the Global GTT and
* waste a lot of cycles under the mutex.
*/
if (fence_size > dev_priv->ggtt.mappable_end)
if (vma->fence_size > dev_priv->ggtt.mappable_end)
return ERR_PTR(-E2BIG);
/* If NONBLOCK is set the caller is optimistically
@@ -3719,7 +3723,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
* we could try to minimise harm to others.
*/
if (flags & PIN_NONBLOCK &&
fence_size > dev_priv->ggtt.mappable_end / 2)
vma->fence_size > dev_priv->ggtt.mappable_end / 2)
return ERR_PTR(-ENOSPC);
}
@@ -4193,7 +4197,8 @@ static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv)
enum intel_engine_id id;
for_each_engine(engine, dev_priv, id)
GEM_BUG_ON(!i915_gem_context_is_kernel(engine->last_retired_context));
GEM_BUG_ON(engine->last_retired_context &&
!i915_gem_context_is_kernel(engine->last_retired_context));
}
int i915_gem_suspend(struct drm_i915_private *dev_priv)

View File

@@ -97,7 +97,7 @@
* part. It should be safe to decrease this, but it's more future proof as is.
*/
#define GEN6_CONTEXT_ALIGN (64<<10)
#define GEN7_CONTEXT_ALIGN 4096
#define GEN7_CONTEXT_ALIGN I915_GTT_MIN_ALIGNMENT
static size_t get_context_alignment(struct drm_i915_private *dev_priv)
{
@@ -205,27 +205,6 @@ alloc_context_obj(struct drm_i915_private *dev_priv, u64 size)
return obj;
}
static void i915_ppgtt_close(struct i915_address_space *vm)
{
struct list_head *phases[] = {
&vm->active_list,
&vm->inactive_list,
&vm->unbound_list,
NULL,
}, **phase;
GEM_BUG_ON(vm->closed);
vm->closed = true;
for (phase = phases; *phase; phase++) {
struct i915_vma *vma, *vn;
list_for_each_entry_safe(vma, vn, *phase, vm_link)
if (!i915_vma_is_closed(vma))
i915_vma_close(vma);
}
}
static void context_close(struct i915_gem_context *ctx)
{
i915_gem_context_set_closed(ctx);
@@ -290,7 +269,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
goto err_out;
}
vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
if (IS_ERR(vma)) {
i915_gem_object_put(obj);
ret = PTR_ERR(vma);
@@ -341,7 +320,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
if (HAS_GUC(dev_priv) && i915.enable_guc_loading)
ctx->ggtt_offset_bias = GUC_WOPCM_TOP;
else
ctx->ggtt_offset_bias = 4096;
ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE;
return ctx;
@@ -456,7 +435,8 @@ int i915_gem_context_init(struct drm_i915_private *dev_priv)
dev_priv->hw_context_size = 0;
} else if (HAS_HW_CONTEXTS(dev_priv)) {
dev_priv->hw_context_size =
round_up(get_context_size(dev_priv), 4096);
round_up(get_context_size(dev_priv),
I915_GTT_PAGE_SIZE);
if (dev_priv->hw_context_size > (1<<20)) {
DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
dev_priv->hw_context_size);
@@ -897,6 +877,26 @@ int i915_switch_context(struct drm_i915_gem_request *req)
return do_rcs_switch(req);
}
static bool engine_has_kernel_context(struct intel_engine_cs *engine)
{
struct i915_gem_timeline *timeline;
list_for_each_entry(timeline, &engine->i915->gt.timelines, link) {
struct intel_timeline *tl;
if (timeline == &engine->i915->gt.global_timeline)
continue;
tl = &timeline->engine[engine->id];
if (i915_gem_active_peek(&tl->last_request,
&engine->i915->drm.struct_mutex))
return false;
}
return (!engine->last_retired_context ||
i915_gem_context_is_kernel(engine->last_retired_context));
}
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
@@ -905,10 +905,15 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
lockdep_assert_held(&dev_priv->drm.struct_mutex);
i915_gem_retire_requests(dev_priv);
for_each_engine(engine, dev_priv, id) {
struct drm_i915_gem_request *req;
int ret;
if (engine_has_kernel_context(engine))
continue;
req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
if (IS_ERR(req))
return PTR_ERR(req);

View File

@@ -231,7 +231,8 @@ found:
/**
* i915_gem_evict_for_vma - Evict vmas to make room for binding a new one
* @target: address space and range to evict for
* @vm: address space to evict from
* @target: range (and color) to evict for
* @flags: additional flags to control the eviction algorithm
*
* This function will try to evict vmas that overlap the target node.
@@ -239,18 +240,20 @@ found:
* To clarify: This is for freeing up virtual address space, not for freeing
* memory in e.g. the shrinker.
*/
int i915_gem_evict_for_vma(struct i915_vma *target, unsigned int flags)
int i915_gem_evict_for_node(struct i915_address_space *vm,
struct drm_mm_node *target,
unsigned int flags)
{
LIST_HEAD(eviction_list);
struct drm_mm_node *node;
u64 start = target->node.start;
u64 end = start + target->node.size;
u64 start = target->start;
u64 end = start + target->size;
struct i915_vma *vma, *next;
bool check_color;
int ret = 0;
lockdep_assert_held(&target->vm->i915->drm.struct_mutex);
trace_i915_gem_evict_vma(target, flags);
lockdep_assert_held(&vm->i915->drm.struct_mutex);
trace_i915_gem_evict_node(vm, target, flags);
/* Retire before we search the active list. Although we have
* reasonable accuracy in our retirement lists, we may have
@@ -258,18 +261,18 @@ int i915_gem_evict_for_vma(struct i915_vma *target, unsigned int flags)
* retiring.
*/
if (!(flags & PIN_NONBLOCK))
i915_gem_retire_requests(target->vm->i915);
i915_gem_retire_requests(vm->i915);
check_color = target->vm->mm.color_adjust;
check_color = vm->mm.color_adjust;
if (check_color) {
/* Expand search to cover neighbouring guard pages (or lack!) */
if (start > target->vm->start)
start -= 4096;
if (end < target->vm->start + target->vm->total)
end += 4096;
if (start > vm->start)
start -= I915_GTT_PAGE_SIZE;
if (end < vm->start + vm->total)
end += I915_GTT_PAGE_SIZE;
}
drm_mm_for_each_node_in_range(node, &target->vm->mm, start, end) {
drm_mm_for_each_node_in_range(node, &vm->mm, start, end) {
/* If we find any non-objects (!vma), we cannot evict them */
if (node->color == I915_COLOR_UNEVICTABLE) {
ret = -ENOSPC;
@@ -285,12 +288,12 @@ int i915_gem_evict_for_vma(struct i915_vma *target, unsigned int flags)
* those as well to make room for our guard pages.
*/
if (check_color) {
if (vma->node.start + vma->node.size == target->node.start) {
if (vma->node.color == target->node.color)
if (vma->node.start + vma->node.size == node->start) {
if (vma->node.color == node->color)
continue;
}
if (vma->node.start == target->node.start + target->node.size) {
if (vma->node.color == target->node.color)
if (vma->node.start == node->start + node->size) {
if (vma->node.color == node->color)
continue;
}
}
@@ -302,7 +305,7 @@ int i915_gem_evict_for_vma(struct i915_vma *target, unsigned int flags)
}
/* Overlap of objects in the same batch? */
if (i915_vma_is_pinned(vma)) {
if (i915_vma_is_pinned(vma) || !list_empty(&vma->exec_list)) {
ret = -ENOSPC;
if (vma->exec_entry &&
vma->exec_entry->flags & EXEC_OBJECT_PINNED)

View File

@@ -184,7 +184,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
* from the (obj, vm) we don't run the risk of creating
* duplicated vmas for the same vm.
*/
vma = i915_gem_obj_lookup_or_create_vma(obj, vm, NULL);
vma = i915_vma_instance(obj, vm, NULL);
if (unlikely(IS_ERR(vma))) {
DRM_DEBUG("Failed to lookup VMA\n");
ret = PTR_ERR(vma);
@@ -438,7 +438,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
memset(&cache->node, 0, sizeof(cache->node));
ret = drm_mm_insert_node_in_range_generic
(&ggtt->base.mm, &cache->node,
4096, 0, I915_COLOR_UNEVICTABLE,
PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_SEARCH_DEFAULT,
DRM_MM_CREATE_DEFAULT);
@@ -851,8 +851,7 @@ eb_vma_misplaced(struct i915_vma *vma)
WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
!i915_vma_is_ggtt(vma));
if (entry->alignment &&
vma->node.start & (entry->alignment - 1))
if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
return true;
if (vma->node.size < entry->pad_to_size)

View File

@@ -77,16 +77,17 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *fence,
val = 0;
if (vma) {
unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
bool is_y_tiled = tiling == I915_TILING_Y;
unsigned int stride = i915_gem_object_get_stride(vma->obj);
u32 row_size = stride * (is_y_tiled ? 32 : 8);
u32 size = rounddown((u32)vma->node.size, row_size);
val = ((vma->node.start + size - 4096) & 0xfffff000) << 32;
val |= vma->node.start & 0xfffff000;
GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
GEM_BUG_ON(!IS_ALIGNED(vma->node.start, I965_FENCE_PAGE));
GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I965_FENCE_PAGE));
GEM_BUG_ON(!IS_ALIGNED(stride, 128));
val = (vma->node.start + vma->fence_size - I965_FENCE_PAGE) << 32;
val |= vma->node.start;
val |= (u64)((stride / 128) - 1) << fence_pitch_shift;
if (is_y_tiled)
if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
val |= BIT(I965_FENCE_TILING_Y_SHIFT);
val |= I965_FENCE_REG_VALID;
}
@@ -122,31 +123,24 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *fence,
unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
bool is_y_tiled = tiling == I915_TILING_Y;
unsigned int stride = i915_gem_object_get_stride(vma->obj);
int pitch_val;
int tile_width;
WARN((vma->node.start & ~I915_FENCE_START_MASK) ||
!is_power_of_2(vma->node.size) ||
(vma->node.start & (vma->node.size - 1)),
"object 0x%08llx [fenceable? %d] not 1M or pot-size (0x%08llx) aligned\n",
vma->node.start,
i915_vma_is_map_and_fenceable(vma),
vma->node.size);
GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
GEM_BUG_ON(vma->node.start & ~I915_FENCE_START_MASK);
GEM_BUG_ON(!is_power_of_2(vma->fence_size));
GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence->i915))
tile_width = 128;
stride /= 128;
else
tile_width = 512;
/* Note: pitch better be a power of two tile widths */
pitch_val = stride / tile_width;
pitch_val = ffs(pitch_val) - 1;
stride /= 512;
GEM_BUG_ON(!is_power_of_2(stride));
val = vma->node.start;
if (is_y_tiled)
val |= BIT(I830_FENCE_TILING_Y_SHIFT);
val |= I915_FENCE_SIZE_BITS(vma->node.size);
val |= pitch_val << I830_FENCE_PITCH_SHIFT;
val |= I915_FENCE_SIZE_BITS(vma->fence_size);
val |= ilog2(stride) << I830_FENCE_PITCH_SHIFT;
val |= I830_FENCE_REG_VALID;
}
@@ -166,25 +160,19 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *fence,
val = 0;
if (vma) {
unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
bool is_y_tiled = tiling == I915_TILING_Y;
unsigned int stride = i915_gem_object_get_stride(vma->obj);
u32 pitch_val;
WARN((vma->node.start & ~I830_FENCE_START_MASK) ||
!is_power_of_2(vma->node.size) ||
(vma->node.start & (vma->node.size - 1)),
"object 0x%08llx not 512K or pot-size 0x%08llx aligned\n",
vma->node.start, vma->node.size);
pitch_val = stride / 128;
pitch_val = ffs(pitch_val) - 1;
GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
GEM_BUG_ON(vma->node.start & ~I830_FENCE_START_MASK);
GEM_BUG_ON(!is_power_of_2(vma->fence_size));
GEM_BUG_ON(!is_power_of_2(stride / 128));
GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
val = vma->node.start;
if (is_y_tiled)
if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
val |= BIT(I830_FENCE_TILING_Y_SHIFT);
val |= I830_FENCE_SIZE_BITS(vma->node.size);
val |= pitch_val << I830_FENCE_PITCH_SHIFT;
val |= I830_FENCE_SIZE_BITS(vma->fence_size);
val |= ilog2(stride / 128) << I830_FENCE_PITCH_SHIFT;
val |= I830_FENCE_REG_VALID;
}

View File

@@ -30,6 +30,8 @@
struct drm_i915_private;
struct i915_vma;
#define I965_FENCE_PAGE 4096UL
struct drm_i915_fence_reg {
struct list_head link;
struct drm_i915_private *i915;

View File

@@ -23,10 +23,14 @@
*
*/
#include <linux/log2.h>
#include <linux/random.h>
#include <linux/seq_file.h>
#include <linux/stop_machine.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_vgpu.h"
#include "i915_trace.h"
@@ -99,12 +103,29 @@
static int
i915_get_ggtt_vma_pages(struct i915_vma *vma);
const struct i915_ggtt_view i915_ggtt_view_normal = {
.type = I915_GGTT_VIEW_NORMAL,
};
const struct i915_ggtt_view i915_ggtt_view_rotated = {
.type = I915_GGTT_VIEW_ROTATED,
};
static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv)
{
/* Note that as an uncached mmio write, this should flush the
* WCB of the writes into the GGTT before it triggers the invalidate.
*/
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
}
static void guc_ggtt_invalidate(struct drm_i915_private *dev_priv)
{
gen6_ggtt_invalidate(dev_priv);
I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
}
static void gmch_ggtt_invalidate(struct drm_i915_private *dev_priv)
{
intel_gtt_chipset_flush();
}
static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
{
i915->ggtt.invalidate(i915);
}
int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
int enable_ppgtt)
@@ -329,7 +350,7 @@ static int __setup_page_dma(struct drm_i915_private *dev_priv,
return -ENOMEM;
p->daddr = dma_map_page(kdev,
p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
p->page, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (dma_mapping_error(kdev, p->daddr)) {
__free_page(p->page);
@@ -353,7 +374,7 @@ static void cleanup_page_dma(struct drm_i915_private *dev_priv,
if (WARN_ON(!p->page))
return;
dma_unmap_page(&pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL);
dma_unmap_page(&pdev->dev, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
__free_page(p->page);
memset(p, 0, sizeof(*p));
}
@@ -626,10 +647,10 @@ static void gen8_initialize_pml4(struct i915_address_space *vm,
}
static void
gen8_setup_page_directory(struct i915_hw_ppgtt *ppgtt,
struct i915_page_directory_pointer *pdp,
struct i915_page_directory *pd,
int index)
gen8_setup_pdpe(struct i915_hw_ppgtt *ppgtt,
struct i915_page_directory_pointer *pdp,
struct i915_page_directory *pd,
int index)
{
gen8_ppgtt_pdpe_t *page_directorypo;
@@ -642,10 +663,10 @@ gen8_setup_page_directory(struct i915_hw_ppgtt *ppgtt,
}
static void
gen8_setup_page_directory_pointer(struct i915_hw_ppgtt *ppgtt,
struct i915_pml4 *pml4,
struct i915_page_directory_pointer *pdp,
int index)
gen8_setup_pml4e(struct i915_hw_ppgtt *ppgtt,
struct i915_pml4 *pml4,
struct i915_page_directory_pointer *pdp,
int index)
{
gen8_ppgtt_pml4e_t *pagemap = kmap_px(pml4);
@@ -793,9 +814,6 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_directory *pd;
uint64_t pdpe;
gen8_ppgtt_pdpe_t *pdpe_vaddr;
gen8_ppgtt_pdpe_t scratch_pdpe =
gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
if (WARN_ON(!pdp->page_directory[pdpe]))
@@ -803,11 +821,7 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
if (gen8_ppgtt_clear_pd(vm, pd, start, length)) {
__clear_bit(pdpe, pdp->used_pdpes);
if (USES_FULL_48BIT_PPGTT(dev_priv)) {
pdpe_vaddr = kmap_px(pdp);
pdpe_vaddr[pdpe] = scratch_pdpe;
kunmap_px(ppgtt, pdpe_vaddr);
}
gen8_setup_pdpe(ppgtt, pdp, vm->scratch_pd, pdpe);
free_pd(vm->i915, pd);
}
}
@@ -832,9 +846,6 @@ static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm,
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_directory_pointer *pdp;
uint64_t pml4e;
gen8_ppgtt_pml4e_t *pml4e_vaddr;
gen8_ppgtt_pml4e_t scratch_pml4e =
gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC);
GEM_BUG_ON(!USES_FULL_48BIT_PPGTT(vm->i915));
@@ -844,9 +855,7 @@ static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm,
if (gen8_ppgtt_clear_pdp(vm, pdp, start, length)) {
__clear_bit(pml4e, pml4->used_pml4es);
pml4e_vaddr = kmap_px(pml4);
pml4e_vaddr[pml4e] = scratch_pml4e;
kunmap_px(ppgtt, pml4e_vaddr);
gen8_setup_pml4e(ppgtt, pml4, vm->scratch_pdp, pml4e);
free_pdp(vm->i915, pdp);
}
}
@@ -1366,7 +1375,7 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
kunmap_px(ppgtt, page_directory);
__set_bit(pdpe, pdp->used_pdpes);
gen8_setup_page_directory(ppgtt, pdp, pd, pdpe);
gen8_setup_pdpe(ppgtt, pdp, pd, pdpe);
}
free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
@@ -1425,7 +1434,7 @@ static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
if (ret)
goto err_out;
gen8_setup_page_directory_pointer(ppgtt, pml4, pdp, pml4e);
gen8_setup_pml4e(ppgtt, pml4, pdp, pml4e);
}
bitmap_or(pml4->used_pml4es, new_pdps, pml4->used_pml4es,
@@ -2044,7 +2053,6 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
struct i915_address_space *vm = &ppgtt->base;
struct drm_i915_private *dev_priv = ppgtt->base.i915;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
bool retried = false;
int ret;
/* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
@@ -2057,29 +2065,14 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
if (ret)
return ret;
alloc:
ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm, &ppgtt->node,
GEN6_PD_SIZE, GEN6_PD_ALIGN,
I915_COLOR_UNEVICTABLE,
0, ggtt->base.total,
DRM_MM_TOPDOWN);
if (ret == -ENOSPC && !retried) {
ret = i915_gem_evict_something(&ggtt->base,
GEN6_PD_SIZE, GEN6_PD_ALIGN,
I915_COLOR_UNEVICTABLE,
0, ggtt->base.total,
0);
if (ret)
goto err_out;
retried = true;
goto alloc;
}
ret = i915_gem_gtt_insert(&ggtt->base, &ppgtt->node,
GEN6_PD_SIZE, GEN6_PD_ALIGN,
I915_COLOR_UNEVICTABLE,
0, ggtt->base.total,
PIN_HIGH);
if (ret)
goto err_out;
if (ppgtt->node.start < ggtt->mappable_end)
DRM_DEBUG("Forced to use aperture for PDEs\n");
@@ -2267,6 +2260,27 @@ i915_ppgtt_create(struct drm_i915_private *dev_priv,
return ppgtt;
}
void i915_ppgtt_close(struct i915_address_space *vm)
{
struct list_head *phases[] = {
&vm->active_list,
&vm->inactive_list,
&vm->unbound_list,
NULL,
}, **phase;
GEM_BUG_ON(vm->closed);
vm->closed = true;
for (phase = phases; *phase; phase++) {
struct i915_vma *vma, *vn;
list_for_each_entry_safe(vma, vn, *phase, vm_link)
if (!i915_vma_is_closed(vma))
i915_vma_close(vma);
}
}
void i915_ppgtt_release(struct kref *kref)
{
struct i915_hw_ppgtt *ppgtt =
@@ -2331,16 +2345,6 @@ void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
}
static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
{
if (INTEL_INFO(dev_priv)->gen < 6) {
intel_gtt_chipset_flush();
} else {
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
}
}
void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
@@ -2355,7 +2359,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total);
i915_ggtt_flush(dev_priv);
i915_ggtt_invalidate(dev_priv);
}
int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
@@ -2394,15 +2398,13 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
enum i915_cache_level level,
u32 unused)
{
struct drm_i915_private *dev_priv = vm->i915;
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
gen8_pte_t __iomem *pte =
(gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
(offset >> PAGE_SHIFT);
(gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
gen8_set_pte(pte, gen8_pte_encode(addr, level));
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
ggtt->invalidate(vm->i915);
}
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
@@ -2410,7 +2412,6 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
uint64_t start,
enum i915_cache_level level, u32 unused)
{
struct drm_i915_private *dev_priv = vm->i915;
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
struct sgt_iter sgt_iter;
gen8_pte_t __iomem *gtt_entries;
@@ -2439,8 +2440,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
* want to flush the TLBs only after we're certain all the PTE updates
* have finished.
*/
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
ggtt->invalidate(vm->i915);
}
struct insert_entries {
@@ -2475,15 +2475,13 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
enum i915_cache_level level,
u32 flags)
{
struct drm_i915_private *dev_priv = vm->i915;
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
gen6_pte_t __iomem *pte =
(gen6_pte_t __iomem *)dev_priv->ggtt.gsm +
(offset >> PAGE_SHIFT);
(gen6_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
iowrite32(vm->pte_encode(addr, level, flags), pte);
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
ggtt->invalidate(vm->i915);
}
/*
@@ -2497,7 +2495,6 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
uint64_t start,
enum i915_cache_level level, u32 flags)
{
struct drm_i915_private *dev_priv = vm->i915;
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
struct sgt_iter sgt_iter;
gen6_pte_t __iomem *gtt_entries;
@@ -2525,8 +2522,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
* want to flush the TLBs only after we're certain all the PTE updates
* have finished.
*/
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
ggtt->invalidate(vm->i915);
}
static void nop_clear_range(struct i915_address_space *vm,
@@ -2723,11 +2719,11 @@ static void i915_gtt_color_adjust(const struct drm_mm_node *node,
u64 *end)
{
if (node->color != color)
*start += 4096;
*start += I915_GTT_PAGE_SIZE;
node = list_next_entry(node, node_list);
if (node->allocated && node->color != color)
*end -= 4096;
*end -= I915_GTT_PAGE_SIZE;
}
int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
@@ -2754,7 +2750,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
/* Reserve a mappable slot for our lockless error capture */
ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm,
&ggtt->error_capture,
4096, 0,
PAGE_SIZE, 0,
I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
0, 0);
@@ -3086,6 +3082,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
if (IS_CHERRYVIEW(dev_priv))
ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
ggtt->invalidate = gen6_ggtt_invalidate;
return ggtt_probe_common(ggtt, size);
}
@@ -3123,6 +3121,8 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
ggtt->base.unbind_vma = ggtt_unbind_vma;
ggtt->base.cleanup = gen6_gmch_remove;
ggtt->invalidate = gen6_ggtt_invalidate;
if (HAS_EDRAM(dev_priv))
ggtt->base.pte_encode = iris_pte_encode;
else if (IS_HASWELL(dev_priv))
@@ -3166,6 +3166,8 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
ggtt->base.unbind_vma = ggtt_unbind_vma;
ggtt->base.cleanup = i915_gmch_remove;
ggtt->invalidate = gmch_ggtt_invalidate;
if (unlikely(ggtt->do_idle_maps))
DRM_INFO("applying Ironlake quirks for intel_iommu\n");
@@ -3284,6 +3286,16 @@ int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
return 0;
}
void i915_ggtt_enable_guc(struct drm_i915_private *i915)
{
i915->ggtt.invalidate = guc_ggtt_invalidate;
}
void i915_ggtt_disable_guc(struct drm_i915_private *i915)
{
i915->ggtt.invalidate = gen6_ggtt_invalidate;
}
void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
@@ -3347,52 +3359,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
}
}
i915_ggtt_flush(dev_priv);
}
struct i915_vma *
i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view)
{
struct rb_node *rb;
rb = obj->vma_tree.rb_node;
while (rb) {
struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
long cmp;
cmp = i915_vma_compare(vma, vm, view);
if (cmp == 0)
return vma;
if (cmp < 0)
rb = rb->rb_right;
else
rb = rb->rb_left;
}
return NULL;
}
struct i915_vma *
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view)
{
struct i915_vma *vma;
lockdep_assert_held(&obj->base.dev->struct_mutex);
GEM_BUG_ON(view && !i915_is_ggtt(vm));
vma = i915_gem_obj_to_vma(obj, vm, view);
if (!vma) {
vma = i915_vma_create(obj, vm, view);
GEM_BUG_ON(vma != i915_gem_obj_to_vma(obj, vm, view));
}
GEM_BUG_ON(i915_vma_is_closed(vma));
return vma;
i915_ggtt_invalidate(dev_priv);
}
static struct scatterlist *
@@ -3492,7 +3459,7 @@ intel_partial_pages(const struct i915_ggtt_view *view,
{
struct sg_table *st;
struct scatterlist *sg, *iter;
unsigned int count = view->params.partial.size;
unsigned int count = view->partial.size;
unsigned int offset;
int ret = -ENOMEM;
@@ -3504,9 +3471,7 @@ intel_partial_pages(const struct i915_ggtt_view *view,
if (ret)
goto err_sg_alloc;
iter = i915_gem_object_get_sg(obj,
view->params.partial.offset,
&offset);
iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
GEM_BUG_ON(!iter);
sg = st->sgl;
@@ -3558,7 +3523,8 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
vma->pages = vma->obj->mm.pages;
else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
vma->pages =
intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj);
intel_rotate_fb_obj_pages(&vma->ggtt_view.rotated,
vma->obj);
else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL)
vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
else
@@ -3579,3 +3545,207 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
return ret;
}
/**
* i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
* @vm: the &struct i915_address_space
* @node: the &struct drm_mm_node (typically i915_vma.mode)
* @size: how much space to allocate inside the GTT,
* must be #I915_GTT_PAGE_SIZE aligned
* @offset: where to insert inside the GTT,
* must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
* (@offset + @size) must fit within the address space
* @color: color to apply to node, if this node is not from a VMA,
* color must be #I915_COLOR_UNEVICTABLE
* @flags: control search and eviction behaviour
*
* i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
* the address space (using @size and @color). If the @node does not fit, it
* tries to evict any overlapping nodes from the GTT, including any
* neighbouring nodes if the colors do not match (to ensure guard pages between
* differing domains). See i915_gem_evict_for_node() for the gory details
* on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
* evicting active overlapping objects, and any overlapping node that is pinned
* or marked as unevictable will also result in failure.
*
* Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
* asked to wait for eviction and interrupted.
*/
int i915_gem_gtt_reserve(struct i915_address_space *vm,
struct drm_mm_node *node,
u64 size, u64 offset, unsigned long color,
unsigned int flags)
{
int err;
GEM_BUG_ON(!size);
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
GEM_BUG_ON(range_overflows(offset, size, vm->total));
GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
GEM_BUG_ON(drm_mm_node_allocated(node));
node->size = size;
node->start = offset;
node->color = color;
err = drm_mm_reserve_node(&vm->mm, node);
if (err != -ENOSPC)
return err;
err = i915_gem_evict_for_node(vm, node, flags);
if (err == 0)
err = drm_mm_reserve_node(&vm->mm, node);
return err;
}
static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
{
u64 range, addr;
GEM_BUG_ON(range_overflows(start, len, end));
GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
range = round_down(end - len, align) - round_up(start, align);
if (range) {
if (sizeof(unsigned long) == sizeof(u64)) {
addr = get_random_long();
} else {
addr = get_random_int();
if (range > U32_MAX) {
addr <<= 32;
addr |= get_random_int();
}
}
div64_u64_rem(addr, range, &addr);
start += addr;
}
return round_up(start, align);
}
/**
* i915_gem_gtt_insert - insert a node into an address_space (GTT)
* @vm: the &struct i915_address_space
* @node: the &struct drm_mm_node (typically i915_vma.node)
* @size: how much space to allocate inside the GTT,
* must be #I915_GTT_PAGE_SIZE aligned
* @alignment: required alignment of starting offset, may be 0 but
* if specified, this must be a power-of-two and at least
* #I915_GTT_MIN_ALIGNMENT
* @color: color to apply to node
* @start: start of any range restriction inside GTT (0 for all),
* must be #I915_GTT_PAGE_SIZE aligned
* @end: end of any range restriction inside GTT (U64_MAX for all),
* must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
* @flags: control search and eviction behaviour
*
* i915_gem_gtt_insert() first searches for an available hole into which
* is can insert the node. The hole address is aligned to @alignment and
* its @size must then fit entirely within the [@start, @end] bounds. The
* nodes on either side of the hole must match @color, or else a guard page
* will be inserted between the two nodes (or the node evicted). If no
* suitable hole is found, first a victim is randomly selected and tested
* for eviction, otherwise then the LRU list of objects within the GTT
* is scanned to find the first set of replacement nodes to create the hole.
* Those old overlapping nodes are evicted from the GTT (and so must be
* rebound before any future use). Any node that is currently pinned cannot
* be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
* active and #PIN_NONBLOCK is specified, that node is also skipped when
* searching for an eviction candidate. See i915_gem_evict_something() for
* the gory details on the eviction algorithm.
*
* Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
* asked to wait for eviction and interrupted.
*/
int i915_gem_gtt_insert(struct i915_address_space *vm,
struct drm_mm_node *node,
u64 size, u64 alignment, unsigned long color,
u64 start, u64 end, unsigned int flags)
{
u32 search_flag, alloc_flag;
u64 offset;
int err;
lockdep_assert_held(&vm->i915->drm.struct_mutex);
GEM_BUG_ON(!size);
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(alignment && !is_power_of_2(alignment));
GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
GEM_BUG_ON(start >= end);
GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
GEM_BUG_ON(drm_mm_node_allocated(node));
if (unlikely(range_overflows(start, size, end)))
return -ENOSPC;
if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
return -ENOSPC;
if (flags & PIN_HIGH) {
search_flag = DRM_MM_SEARCH_BELOW;
alloc_flag = DRM_MM_CREATE_TOP;
} else {
search_flag = DRM_MM_SEARCH_DEFAULT;
alloc_flag = DRM_MM_CREATE_DEFAULT;
}
/* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
* so we know that we always have a minimum alignment of 4096.
* The drm_mm range manager is optimised to return results
* with zero alignment, so where possible use the optimal
* path.
*/
BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
if (alignment <= I915_GTT_MIN_ALIGNMENT)
alignment = 0;
err = drm_mm_insert_node_in_range_generic(&vm->mm, node,
size, alignment, color,
start, end,
search_flag, alloc_flag);
if (err != -ENOSPC)
return err;
/* No free space, pick a slot at random.
*
* There is a pathological case here using a GTT shared between
* mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
*
* |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
* (64k objects) (448k objects)
*
* Now imagine that the eviction LRU is ordered top-down (just because
* pathology meets real life), and that we need to evict an object to
* make room inside the aperture. The eviction scan then has to walk
* the 448k list before it finds one within range. And now imagine that
* it has to search for a new hole between every byte inside the memcpy,
* for several simultaneous clients.
*
* On a full-ppgtt system, if we have run out of available space, there
* will be lots and lots of objects in the eviction list! Again,
* searching that LRU list may be slow if we are also applying any
* range restrictions (e.g. restriction to low 4GiB) and so, for
* simplicity and similarilty between different GTT, try the single
* random replacement first.
*/
offset = random_offset(start, end,
size, alignment ?: I915_GTT_MIN_ALIGNMENT);
err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
if (err != -ENOSPC)
return err;
/* Randomly selected placement is pinned, do a search */
err = i915_gem_evict_something(vm, size, alignment, color,
start, end, flags);
if (err)
return err;
search_flag = DRM_MM_SEARCH_DEFAULT;
return drm_mm_insert_node_in_range_generic(&vm->mm, node,
size, alignment, color,
start, end,
search_flag, alloc_flag);
}

View File

@@ -40,6 +40,9 @@
#include "i915_gem_timeline.h"
#include "i915_gem_request.h"
#define I915_GTT_PAGE_SIZE 4096UL
#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
#define I915_FENCE_REG_NONE -1
#define I915_MAX_NUM_FENCES 32
/* 32 fences + sign bit for FENCE_REG_NONE */
@@ -142,34 +145,57 @@ typedef uint64_t gen8_ppgtt_pml4e_t;
struct sg_table;
enum i915_ggtt_view_type {
I915_GGTT_VIEW_NORMAL = 0,
I915_GGTT_VIEW_ROTATED,
I915_GGTT_VIEW_PARTIAL,
};
struct intel_rotation_info {
struct {
struct intel_rotation_plane_info {
/* tiles */
unsigned int width, height, stride, offset;
} plane[2];
} __packed;
static inline void assert_intel_rotation_info_is_packed(void)
{
BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 8*sizeof(unsigned int));
}
struct intel_partial_info {
u64 offset;
unsigned int size;
} __packed;
static inline void assert_intel_partial_info_is_packed(void)
{
BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int));
}
enum i915_ggtt_view_type {
I915_GGTT_VIEW_NORMAL = 0,
I915_GGTT_VIEW_ROTATED = sizeof(struct intel_rotation_info),
I915_GGTT_VIEW_PARTIAL = sizeof(struct intel_partial_info),
};
static inline void assert_i915_ggtt_view_type_is_unique(void)
{
/* As we encode the size of each branch inside the union into its type,
* we have to be careful that each branch has a unique size.
*/
switch ((enum i915_ggtt_view_type)0) {
case I915_GGTT_VIEW_NORMAL:
case I915_GGTT_VIEW_PARTIAL:
case I915_GGTT_VIEW_ROTATED:
/* gcc complains if these are identical cases */
break;
}
}
struct i915_ggtt_view {
enum i915_ggtt_view_type type;
union {
struct {
u64 offset;
unsigned int size;
} partial;
/* Members need to contain no holes/padding */
struct intel_partial_info partial;
struct intel_rotation_info rotated;
} params;
};
};
extern const struct i915_ggtt_view i915_ggtt_view_normal;
extern const struct i915_ggtt_view i915_ggtt_view_rotated;
enum i915_cache_level;
struct i915_vma;
@@ -333,6 +359,7 @@ struct i915_ggtt {
/** "Graphics Stolen Memory" holds the global PTEs */
void __iomem *gsm;
void (*invalidate)(struct drm_i915_private *dev_priv);
bool do_idle_maps;
@@ -501,6 +528,8 @@ i915_vm_to_ggtt(struct i915_address_space *vm)
int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv);
int i915_ggtt_init_hw(struct drm_i915_private *dev_priv);
int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv);
void i915_ggtt_enable_guc(struct drm_i915_private *i915);
void i915_ggtt_disable_guc(struct drm_i915_private *i915);
int i915_gem_init_ggtt(struct drm_i915_private *dev_priv);
void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
@@ -509,6 +538,7 @@ void i915_ppgtt_release(struct kref *kref);
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *fpriv,
const char *name);
void i915_ppgtt_close(struct i915_address_space *vm);
static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
{
if (ppgtt)
@@ -529,6 +559,16 @@ int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages);
int i915_gem_gtt_reserve(struct i915_address_space *vm,
struct drm_mm_node *node,
u64 size, u64 offset, unsigned long color,
unsigned int flags);
int i915_gem_gtt_insert(struct i915_address_space *vm,
struct drm_mm_node *node,
u64 size, u64 alignment, unsigned long color,
u64 start, u64 end, unsigned int flags);
/* Flags used by pin/bind&friends. */
#define PIN_NONBLOCK BIT(0)
#define PIN_MAPPABLE BIT(1)
@@ -543,6 +583,6 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
#define PIN_HIGH BIT(9)
#define PIN_OFFSET_BIAS BIT(10)
#define PIN_OFFSET_FIXED BIT(11)
#define PIN_OFFSET_MASK (~4095)
#define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE)
#endif

View File

@@ -151,10 +151,16 @@ static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
*/
struct drm_i915_gem_object *
i915_gem_object_create_internal(struct drm_i915_private *i915,
unsigned int size)
phys_addr_t size)
{
struct drm_i915_gem_object *obj;
GEM_BUG_ON(!size);
GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
obj = i915_gem_object_alloc(i915);
if (!obj)
return ERR_PTR(-ENOMEM);

View File

@@ -317,6 +317,29 @@ i915_gem_object_get_stride(struct drm_i915_gem_object *obj)
return obj->tiling_and_stride & STRIDE_MASK;
}
static inline unsigned int
i915_gem_tile_height(unsigned int tiling)
{
GEM_BUG_ON(!tiling);
return tiling == I915_TILING_Y ? 32 : 8;
}
static inline unsigned int
i915_gem_object_get_tile_height(struct drm_i915_gem_object *obj)
{
return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
}
static inline unsigned int
i915_gem_object_get_tile_row_size(struct drm_i915_gem_object *obj)
{
return (i915_gem_object_get_stride(obj) *
i915_gem_object_get_tile_height(obj));
}
int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
unsigned int tiling, unsigned int stride);
static inline struct intel_engine_cs *
i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
{

View File

@@ -187,20 +187,20 @@ int i915_gem_render_state_init(struct intel_engine_cs *engine)
if (!rodata)
return 0;
if (rodata->batch_items * 4 > 4096)
if (rodata->batch_items * 4 > PAGE_SIZE)
return -EINVAL;
so = kmalloc(sizeof(*so), GFP_KERNEL);
if (!so)
return -ENOMEM;
obj = i915_gem_object_create_internal(engine->i915, 4096);
obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
goto err_free;
}
so->vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
so->vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
if (IS_ERR(so->vma)) {
ret = PTR_ERR(so->vma);
goto err_obj;

View File

@@ -307,26 +307,6 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
} while (tmp != req);
}
static int i915_gem_check_wedge(struct drm_i915_private *dev_priv)
{
struct i915_gpu_error *error = &dev_priv->gpu_error;
if (i915_terminally_wedged(error))
return -EIO;
if (i915_reset_in_progress(error)) {
/* Non-interruptible callers can't handle -EAGAIN, hence return
* -EIO unconditionally for these.
*/
if (!dev_priv->mm.interruptible)
return -EIO;
return -EAGAIN;
}
return 0;
}
static int i915_gem_init_global_seqno(struct drm_i915_private *i915, u32 seqno)
{
struct i915_gem_timeline *timeline = &i915->gt.global_timeline;
@@ -521,12 +501,10 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
lockdep_assert_held(&dev_priv->drm.struct_mutex);
/* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
* EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
* and restart.
* EIO if the GPU is already wedged.
*/
ret = i915_gem_check_wedge(dev_priv);
if (ret)
return ERR_PTR(ret);
if (i915_terminally_wedged(&dev_priv->gpu_error))
return ERR_PTR(-EIO);
/* Pinning the contexts may generate requests in order to acquire
* GGTT space, so do this first before we reserve a seqno for
@@ -851,6 +829,13 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
lockdep_assert_held(&request->i915->drm.struct_mutex);
trace_i915_gem_request_add(request);
/* Make sure that no request gazumped us - if it was allocated after
* our i915_gem_request_alloc() and called __i915_add_request() before
* us, the timeline will hold its seqno which is later than ours.
*/
GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno,
request->fence.seqno));
/*
* To ensure that this call will not fail, space for its emissions
* should already have been reserved in the ring buffer. Let the ring

View File

@@ -647,8 +647,9 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
stolen_offset, gtt_offset, size);
/* KISS and expect everything to be page-aligned */
if (WARN_ON(size == 0) || WARN_ON(size & 4095) ||
WARN_ON(stolen_offset & 4095))
if (WARN_ON(size == 0) ||
WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
return NULL;
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
@@ -682,7 +683,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
if (ret)
goto err;
vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base, NULL);
vma = i915_vma_instance(obj, &ggtt->base, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_pages;
@@ -693,15 +694,16 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
* setting up the GTT space. The actual reservation will occur
* later.
*/
vma->node.start = gtt_offset;
vma->node.size = size;
ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
ret = i915_gem_gtt_reserve(&ggtt->base, &vma->node,
size, gtt_offset, obj->cache_level,
0);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
goto err_pages;
}
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
vma->pages = obj->mm.pages;
vma->flags |= I915_VMA_GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);

View File

@@ -34,8 +34,8 @@
/**
* DOC: buffer object tiling
*
* i915_gem_set_tiling() and i915_gem_get_tiling() is the userspace interface to
* declare fence register requirements.
* i915_gem_set_tiling_ioctl() and i915_gem_get_tiling_ioctl() is the userspace
* interface to declare fence register requirements.
*
* In principle GEM doesn't care at all about the internal data layout of an
* object, and hence it also doesn't care about tiling or swizzling. There's two
@@ -58,86 +58,147 @@
* invovlement.
*/
/**
* i915_gem_fence_size - required global GTT size for a fence
* @i915: i915 device
* @size: object size
* @tiling: tiling mode
* @stride: tiling stride
*
* Return the required global GTT size for a fence (view of a tiled object),
* taking into account potential fence register mapping.
*/
u32 i915_gem_fence_size(struct drm_i915_private *i915,
u32 size, unsigned int tiling, unsigned int stride)
{
u32 ggtt_size;
GEM_BUG_ON(!size);
if (tiling == I915_TILING_NONE)
return size;
GEM_BUG_ON(!stride);
if (INTEL_GEN(i915) >= 4) {
stride *= i915_gem_tile_height(tiling);
GEM_BUG_ON(!IS_ALIGNED(stride, I965_FENCE_PAGE));
return roundup(size, stride);
}
/* Previous chips need a power-of-two fence region when tiling */
if (IS_GEN3(i915))
ggtt_size = 1024*1024;
else
ggtt_size = 512*1024;
while (ggtt_size < size)
ggtt_size <<= 1;
return ggtt_size;
}
/**
* i915_gem_fence_alignment - required global GTT alignment for a fence
* @i915: i915 device
* @size: object size
* @tiling: tiling mode
* @stride: tiling stride
*
* Return the required global GTT alignment for a fence (a view of a tiled
* object), taking into account potential fence register mapping.
*/
u32 i915_gem_fence_alignment(struct drm_i915_private *i915, u32 size,
unsigned int tiling, unsigned int stride)
{
GEM_BUG_ON(!size);
/*
* Minimum alignment is 4k (GTT page size), but might be greater
* if a fence register is needed for the object.
*/
if (tiling == I915_TILING_NONE)
return I915_GTT_MIN_ALIGNMENT;
if (INTEL_GEN(i915) >= 4)
return I965_FENCE_PAGE;
/*
* Previous chips need to be aligned to the size of the smallest
* fence register that can contain the object.
*/
return i915_gem_fence_size(i915, size, tiling, stride);
}
/* Check pitch constriants for all chips & tiling formats */
static bool
i915_tiling_ok(struct drm_i915_private *dev_priv,
int stride, int size, int tiling_mode)
i915_tiling_ok(struct drm_i915_gem_object *obj,
unsigned int tiling, unsigned int stride)
{
int tile_width;
struct drm_i915_private *i915 = to_i915(obj->base.dev);
unsigned int tile_width;
/* Linear is always fine */
if (tiling_mode == I915_TILING_NONE)
if (tiling == I915_TILING_NONE)
return true;
if (tiling_mode > I915_TILING_LAST)
if (tiling > I915_TILING_LAST)
return false;
if (IS_GEN2(dev_priv) ||
(tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev_priv)))
tile_width = 128;
else
tile_width = 512;
/* check maximum stride & object size */
/* i965+ stores the end address of the gtt mapping in the fence
* reg, so dont bother to check the size */
if (INTEL_GEN(dev_priv) >= 7) {
if (INTEL_GEN(i915) >= 7) {
if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL)
return false;
} else if (INTEL_GEN(dev_priv) >= 4) {
} else if (INTEL_GEN(i915) >= 4) {
if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
return false;
} else {
if (stride > 8192)
return false;
if (IS_GEN3(dev_priv)) {
if (size > I830_FENCE_MAX_SIZE_VAL << 20)
if (IS_GEN3(i915)) {
if (obj->base.size > I830_FENCE_MAX_SIZE_VAL << 20)
return false;
} else {
if (size > I830_FENCE_MAX_SIZE_VAL << 19)
if (obj->base.size > I830_FENCE_MAX_SIZE_VAL << 19)
return false;
}
}
if (stride < tile_width)
if (IS_GEN2(i915) ||
(tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(i915)))
tile_width = 128;
else
tile_width = 512;
if (!IS_ALIGNED(stride, tile_width))
return false;
/* 965+ just needs multiples of tile width */
if (INTEL_GEN(dev_priv) >= 4) {
if (stride & (tile_width - 1))
return false;
if (INTEL_GEN(i915) >= 4)
return true;
}
/* Pre-965 needs power of two tile widths */
if (stride & (stride - 1))
return false;
return true;
return is_power_of_2(stride);
}
static bool i915_vma_fence_prepare(struct i915_vma *vma, int tiling_mode)
static bool i915_vma_fence_prepare(struct i915_vma *vma,
int tiling_mode, unsigned int stride)
{
struct drm_i915_private *dev_priv = vma->vm->i915;
u32 size;
struct drm_i915_private *i915 = vma->vm->i915;
u32 size, alignment;
if (!i915_vma_is_map_and_fenceable(vma))
return true;
if (INTEL_GEN(dev_priv) == 3) {
if (vma->node.start & ~I915_FENCE_START_MASK)
return false;
} else {
if (vma->node.start & ~I830_FENCE_START_MASK)
return false;
}
size = i915_gem_get_ggtt_size(dev_priv, vma->size, tiling_mode);
size = i915_gem_fence_size(i915, vma->size, tiling_mode, stride);
if (vma->node.size < size)
return false;
if (vma->node.start & (size - 1))
alignment = i915_gem_fence_alignment(i915, vma->size, tiling_mode, stride);
if (!IS_ALIGNED(vma->node.start, alignment))
return false;
return true;
@@ -145,20 +206,20 @@ static bool i915_vma_fence_prepare(struct i915_vma *vma, int tiling_mode)
/* Make the current GTT allocation valid for the change in tiling. */
static int
i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj, int tiling_mode)
i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj,
int tiling_mode, unsigned int stride)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_vma *vma;
int ret;
if (tiling_mode == I915_TILING_NONE)
return 0;
if (INTEL_GEN(dev_priv) >= 4)
return 0;
list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (i915_vma_fence_prepare(vma, tiling_mode))
if (!i915_vma_is_ggtt(vma))
break;
if (i915_vma_fence_prepare(vma, tiling_mode, stride))
continue;
ret = i915_vma_unbind(vma);
@@ -169,8 +230,100 @@ i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj, int tiling_mode)
return 0;
}
int
i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
unsigned int tiling, unsigned int stride)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_vma *vma;
int err;
/* Make sure we don't cross-contaminate obj->tiling_and_stride */
BUILD_BUG_ON(I915_TILING_LAST & STRIDE_MASK);
GEM_BUG_ON(!i915_tiling_ok(obj, tiling, stride));
GEM_BUG_ON(!stride ^ (tiling == I915_TILING_NONE));
lockdep_assert_held(&i915->drm.struct_mutex);
if ((tiling | stride) == obj->tiling_and_stride)
return 0;
if (obj->framebuffer_references)
return -EBUSY;
/* We need to rebind the object if its current allocation
* no longer meets the alignment restrictions for its new
* tiling mode. Otherwise we can just leave it alone, but
* need to ensure that any fence register is updated before
* the next fenced (either through the GTT or by the BLT unit
* on older GPUs) access.
*
* After updating the tiling parameters, we then flag whether
* we need to update an associated fence register. Note this
* has to also include the unfenced register the GPU uses
* whilst executing a fenced command for an untiled object.
*/
err = i915_gem_object_fence_prepare(obj, tiling, stride);
if (err)
return err;
/* If the memory has unknown (i.e. varying) swizzling, we pin the
* pages to prevent them being swapped out and causing corruption
* due to the change in swizzling.
*/
mutex_lock(&obj->mm.lock);
if (obj->mm.pages &&
obj->mm.madv == I915_MADV_WILLNEED &&
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
if (tiling == I915_TILING_NONE) {
GEM_BUG_ON(!obj->mm.quirked);
__i915_gem_object_unpin_pages(obj);
obj->mm.quirked = false;
}
if (!i915_gem_object_is_tiled(obj)) {
GEM_BUG_ON(!obj->mm.quirked);
__i915_gem_object_pin_pages(obj);
obj->mm.quirked = true;
}
}
mutex_unlock(&obj->mm.lock);
list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (!i915_vma_is_ggtt(vma))
break;
vma->fence_size =
i915_gem_fence_size(i915, vma->size, tiling, stride);
vma->fence_alignment =
i915_gem_fence_alignment(i915,
vma->size, tiling, stride);
if (vma->fence)
vma->fence->dirty = true;
}
obj->tiling_and_stride = tiling | stride;
/* Force the fence to be reacquired for GTT access */
i915_gem_release_mmap(obj);
/* Try to preallocate memory required to save swizzling on put-pages */
if (i915_gem_object_needs_bit17_swizzle(obj)) {
if (!obj->bit_17) {
obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT),
sizeof(long), GFP_KERNEL);
}
} else {
kfree(obj->bit_17);
obj->bit_17 = NULL;
}
return 0;
}
/**
* i915_gem_set_tiling - IOCTL handler to set tiling mode
* i915_gem_set_tiling_ioctl - IOCTL handler to set tiling mode
* @dev: DRM device
* @data: data pointer for the ioctl
* @file: DRM file for the ioctl call
@@ -184,30 +337,19 @@ i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj, int tiling_mode)
* Zero on success, negative errno on failure.
*/
int
i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_file *file)
i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_set_tiling *args = data;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
int err = 0;
/* Make sure we don't cross-contaminate obj->tiling_and_stride */
BUILD_BUG_ON(I915_TILING_LAST & STRIDE_MASK);
int err;
obj = i915_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
if (!i915_tiling_ok(dev_priv,
args->stride, obj->base.size, args->tiling_mode)) {
i915_gem_object_put(obj);
return -EINVAL;
}
mutex_lock(&dev->struct_mutex);
if (obj->pin_display || obj->framebuffer_references) {
err = -EBUSY;
if (!i915_tiling_ok(obj, args->tiling_mode, args->stride)) {
err = -EINVAL;
goto err;
}
@@ -216,9 +358,9 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
args->stride = 0;
} else {
if (args->tiling_mode == I915_TILING_X)
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
args->swizzle_mode = to_i915(dev)->mm.bit_6_swizzle_x;
else
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
args->swizzle_mode = to_i915(dev)->mm.bit_6_swizzle_y;
/* Hide bit 17 swizzling from the user. This prevents old Mesa
* from aborting the application on sw fallbacks to bit 17,
@@ -240,79 +382,24 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
}
}
if (args->tiling_mode != i915_gem_object_get_tiling(obj) ||
args->stride != i915_gem_object_get_stride(obj)) {
/* We need to rebind the object if its current allocation
* no longer meets the alignment restrictions for its new
* tiling mode. Otherwise we can just leave it alone, but
* need to ensure that any fence register is updated before
* the next fenced (either through the GTT or by the BLT unit
* on older GPUs) access.
*
* After updating the tiling parameters, we then flag whether
* we need to update an associated fence register. Note this
* has to also include the unfenced register the GPU uses
* whilst executing a fenced command for an untiled object.
*/
err = mutex_lock_interruptible(&dev->struct_mutex);
if (err)
goto err;
err = i915_gem_object_fence_prepare(obj, args->tiling_mode);
if (!err) {
struct i915_vma *vma;
err = i915_gem_object_set_tiling(obj, args->tiling_mode, args->stride);
mutex_unlock(&dev->struct_mutex);
mutex_lock(&obj->mm.lock);
if (obj->mm.pages &&
obj->mm.madv == I915_MADV_WILLNEED &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
if (args->tiling_mode == I915_TILING_NONE) {
GEM_BUG_ON(!obj->mm.quirked);
__i915_gem_object_unpin_pages(obj);
obj->mm.quirked = false;
}
if (!i915_gem_object_is_tiled(obj)) {
GEM_BUG_ON(!obj->mm.quirked);
__i915_gem_object_pin_pages(obj);
obj->mm.quirked = true;
}
}
mutex_unlock(&obj->mm.lock);
list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (!vma->fence)
continue;
vma->fence->dirty = true;
}
obj->tiling_and_stride =
args->stride | args->tiling_mode;
/* Force the fence to be reacquired for GTT access */
i915_gem_release_mmap(obj);
}
}
/* we have to maintain this existing ABI... */
/* We have to maintain this existing ABI... */
args->stride = i915_gem_object_get_stride(obj);
args->tiling_mode = i915_gem_object_get_tiling(obj);
/* Try to preallocate memory required to save swizzling on put-pages */
if (i915_gem_object_needs_bit17_swizzle(obj)) {
if (obj->bit_17 == NULL) {
obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT),
sizeof(long), GFP_KERNEL);
}
} else {
kfree(obj->bit_17);
obj->bit_17 = NULL;
}
err:
i915_gem_object_put(obj);
mutex_unlock(&dev->struct_mutex);
return err;
}
/**
* i915_gem_get_tiling - IOCTL handler to get tiling mode
* i915_gem_get_tiling_ioctl - IOCTL handler to get tiling mode
* @dev: DRM device
* @data: data pointer for the ioctl
* @file: DRM file for the ioctl call
@@ -325,8 +412,8 @@ err:
* Zero on success, negative errno on failure.
*/
int
i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_file *file)
i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_get_tiling *args = data;
struct drm_i915_private *dev_priv = to_i915(dev);

View File

@@ -121,6 +121,7 @@ static void __i915_error_advance(struct drm_i915_error_state_buf *e,
e->pos += len;
}
__printf(2, 0)
static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
const char *f, va_list args)
{

View File

@@ -61,12 +61,18 @@
#define DMA_ADDRESS_SPACE_GTT (8 << 16)
#define DMA_COPY_SIZE _MMIO(0xc310)
#define DMA_CTRL _MMIO(0xc314)
#define HUC_UKERNEL (1<<9)
#define UOS_MOVE (1<<4)
#define START_DMA (1<<0)
#define DMA_GUC_WOPCM_OFFSET _MMIO(0xc340)
#define HUC_LOADING_AGENT_VCR (0<<1)
#define HUC_LOADING_AGENT_GUC (1<<1)
#define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */
#define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4)
#define HUC_STATUS2 _MMIO(0xD3B0)
#define HUC_FW_VERIFIED (1<<7)
/* Defines WOPCM space available to GuC firmware */
#define GUC_WOPCM_SIZE _MMIO(0xc050)
/* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */

View File

@@ -22,8 +22,6 @@
*
*/
#include <linux/circ_buf.h>
#include <linux/debugfs.h>
#include <linux/relay.h>
#include "i915_drv.h"
#include "intel_uc.h"
@@ -545,7 +543,7 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq)
*/
/**
* guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
* intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
* @guc: the guc
* @size: size of area to allocate (both virtual space and memory)
*
@@ -557,7 +555,7 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq)
*
* Return: A i915_vma if successful, otherwise an ERR_PTR.
*/
static struct i915_vma *guc_allocate_vma(struct intel_guc *guc, u32 size)
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct drm_i915_gem_object *obj;
@@ -568,7 +566,7 @@ static struct i915_vma *guc_allocate_vma(struct intel_guc *guc, u32 size)
if (IS_ERR(obj))
return ERR_CAST(obj);
vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
if (IS_ERR(vma))
goto err;
@@ -579,9 +577,6 @@ static struct i915_vma *guc_allocate_vma(struct intel_guc *guc, u32 size)
goto err;
}
/* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
return vma;
err:
@@ -721,7 +716,7 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
}
/* The first page is doorbell/proc_desc. Two followed pages are wq. */
vma = guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE);
vma = intel_guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE);
if (IS_ERR(vma))
goto err;
@@ -777,488 +772,7 @@ err:
return NULL;
}
/*
* Sub buffer switch callback. Called whenever relay has to switch to a new
* sub buffer, relay stays on the same sub buffer if 0 is returned.
*/
static int subbuf_start_callback(struct rchan_buf *buf,
void *subbuf,
void *prev_subbuf,
size_t prev_padding)
{
/* Use no-overwrite mode by default, where relay will stop accepting
* new data if there are no empty sub buffers left.
* There is no strict synchronization enforced by relay between Consumer
* and Producer. In overwrite mode, there is a possibility of getting
* inconsistent/garbled data, the producer could be writing on to the
* same sub buffer from which Consumer is reading. This can't be avoided
* unless Consumer is fast enough and can always run in tandem with
* Producer.
*/
if (relay_buf_full(buf))
return 0;
return 1;
}
/*
* file_create() callback. Creates relay file in debugfs.
*/
static struct dentry *create_buf_file_callback(const char *filename,
struct dentry *parent,
umode_t mode,
struct rchan_buf *buf,
int *is_global)
{
struct dentry *buf_file;
/* This to enable the use of a single buffer for the relay channel and
* correspondingly have a single file exposed to User, through which
* it can collect the logs in order without any post-processing.
* Need to set 'is_global' even if parent is NULL for early logging.
*/
*is_global = 1;
if (!parent)
return NULL;
/* Not using the channel filename passed as an argument, since for each
* channel relay appends the corresponding CPU number to the filename
* passed in relay_open(). This should be fine as relay just needs a
* dentry of the file associated with the channel buffer and that file's
* name need not be same as the filename passed as an argument.
*/
buf_file = debugfs_create_file("guc_log", mode,
parent, buf, &relay_file_operations);
return buf_file;
}
/*
* file_remove() default callback. Removes relay file in debugfs.
*/
static int remove_buf_file_callback(struct dentry *dentry)
{
debugfs_remove(dentry);
return 0;
}
/* relay channel callbacks */
static struct rchan_callbacks relay_callbacks = {
.subbuf_start = subbuf_start_callback,
.create_buf_file = create_buf_file_callback,
.remove_buf_file = remove_buf_file_callback,
};
static void guc_log_remove_relay_file(struct intel_guc *guc)
{
relay_close(guc->log.relay_chan);
}
static int guc_log_create_relay_channel(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct rchan *guc_log_relay_chan;
size_t n_subbufs, subbuf_size;
/* Keep the size of sub buffers same as shared log buffer */
subbuf_size = guc->log.vma->obj->base.size;
/* Store up to 8 snapshots, which is large enough to buffer sufficient
* boot time logs and provides enough leeway to User, in terms of
* latency, for consuming the logs from relay. Also doesn't take
* up too much memory.
*/
n_subbufs = 8;
guc_log_relay_chan = relay_open(NULL, NULL, subbuf_size,
n_subbufs, &relay_callbacks, dev_priv);
if (!guc_log_relay_chan) {
DRM_ERROR("Couldn't create relay chan for GuC logging\n");
return -ENOMEM;
}
GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
guc->log.relay_chan = guc_log_relay_chan;
return 0;
}
static int guc_log_create_relay_file(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct dentry *log_dir;
int ret;
/* For now create the log file in /sys/kernel/debug/dri/0 dir */
log_dir = dev_priv->drm.primary->debugfs_root;
/* If /sys/kernel/debug/dri/0 location do not exist, then debugfs is
* not mounted and so can't create the relay file.
* The relay API seems to fit well with debugfs only, for availing relay
* there are 3 requirements which can be met for debugfs file only in a
* straightforward/clean manner :-
* i) Need the associated dentry pointer of the file, while opening the
* relay channel.
* ii) Should be able to use 'relay_file_operations' fops for the file.
* iii) Set the 'i_private' field of file's inode to the pointer of
* relay channel buffer.
*/
if (!log_dir) {
DRM_ERROR("Debugfs dir not available yet for GuC log file\n");
return -ENODEV;
}
ret = relay_late_setup_files(guc->log.relay_chan, "guc_log", log_dir);
if (ret) {
DRM_ERROR("Couldn't associate relay chan with file %d\n", ret);
return ret;
}
return 0;
}
static void guc_move_to_next_buf(struct intel_guc *guc)
{
/* Make sure the updates made in the sub buffer are visible when
* Consumer sees the following update to offset inside the sub buffer.
*/
smp_wmb();
/* All data has been written, so now move the offset of sub buffer. */
relay_reserve(guc->log.relay_chan, guc->log.vma->obj->base.size);
/* Switch to the next sub buffer */
relay_flush(guc->log.relay_chan);
}
static void *guc_get_write_buffer(struct intel_guc *guc)
{
if (!guc->log.relay_chan)
return NULL;
/* Just get the base address of a new sub buffer and copy data into it
* ourselves. NULL will be returned in no-overwrite mode, if all sub
* buffers are full. Could have used the relay_write() to indirectly
* copy the data, but that would have been bit convoluted, as we need to
* write to only certain locations inside a sub buffer which cannot be
* done without using relay_reserve() along with relay_write(). So its
* better to use relay_reserve() alone.
*/
return relay_reserve(guc->log.relay_chan, 0);
}
static bool
guc_check_log_buf_overflow(struct intel_guc *guc,
enum guc_log_buffer_type type, unsigned int full_cnt)
{
unsigned int prev_full_cnt = guc->log.prev_overflow_count[type];
bool overflow = false;
if (full_cnt != prev_full_cnt) {
overflow = true;
guc->log.prev_overflow_count[type] = full_cnt;
guc->log.total_overflow_count[type] += full_cnt - prev_full_cnt;
if (full_cnt < prev_full_cnt) {
/* buffer_full_cnt is a 4 bit counter */
guc->log.total_overflow_count[type] += 16;
}
DRM_ERROR_RATELIMITED("GuC log buffer overflow\n");
}
return overflow;
}
static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
{
switch (type) {
case GUC_ISR_LOG_BUFFER:
return (GUC_LOG_ISR_PAGES + 1) * PAGE_SIZE;
case GUC_DPC_LOG_BUFFER:
return (GUC_LOG_DPC_PAGES + 1) * PAGE_SIZE;
case GUC_CRASH_DUMP_LOG_BUFFER:
return (GUC_LOG_CRASH_PAGES + 1) * PAGE_SIZE;
default:
MISSING_CASE(type);
}
return 0;
}
static void guc_read_update_log_buffer(struct intel_guc *guc)
{
unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt;
struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state;
struct guc_log_buffer_state log_buf_state_local;
enum guc_log_buffer_type type;
void *src_data, *dst_data;
bool new_overflow;
if (WARN_ON(!guc->log.buf_addr))
return;
/* Get the pointer to shared GuC log buffer */
log_buf_state = src_data = guc->log.buf_addr;
/* Get the pointer to local buffer to store the logs */
log_buf_snapshot_state = dst_data = guc_get_write_buffer(guc);
/* Actual logs are present from the 2nd page */
src_data += PAGE_SIZE;
dst_data += PAGE_SIZE;
for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
/* Make a copy of the state structure, inside GuC log buffer
* (which is uncached mapped), on the stack to avoid reading
* from it multiple times.
*/
memcpy(&log_buf_state_local, log_buf_state,
sizeof(struct guc_log_buffer_state));
buffer_size = guc_get_log_buffer_size(type);
read_offset = log_buf_state_local.read_ptr;
write_offset = log_buf_state_local.sampled_write_ptr;
full_cnt = log_buf_state_local.buffer_full_cnt;
/* Bookkeeping stuff */
guc->log.flush_count[type] += log_buf_state_local.flush_to_file;
new_overflow = guc_check_log_buf_overflow(guc, type, full_cnt);
/* Update the state of shared log buffer */
log_buf_state->read_ptr = write_offset;
log_buf_state->flush_to_file = 0;
log_buf_state++;
if (unlikely(!log_buf_snapshot_state))
continue;
/* First copy the state structure in snapshot buffer */
memcpy(log_buf_snapshot_state, &log_buf_state_local,
sizeof(struct guc_log_buffer_state));
/* The write pointer could have been updated by GuC firmware,
* after sending the flush interrupt to Host, for consistency
* set write pointer value to same value of sampled_write_ptr
* in the snapshot buffer.
*/
log_buf_snapshot_state->write_ptr = write_offset;
log_buf_snapshot_state++;
/* Now copy the actual logs. */
if (unlikely(new_overflow)) {
/* copy the whole buffer in case of overflow */
read_offset = 0;
write_offset = buffer_size;
} else if (unlikely((read_offset > buffer_size) ||
(write_offset > buffer_size))) {
DRM_ERROR("invalid log buffer state\n");
/* copy whole buffer as offsets are unreliable */
read_offset = 0;
write_offset = buffer_size;
}
/* Just copy the newly written data */
if (read_offset > write_offset) {
i915_memcpy_from_wc(dst_data, src_data, write_offset);
bytes_to_copy = buffer_size - read_offset;
} else {
bytes_to_copy = write_offset - read_offset;
}
i915_memcpy_from_wc(dst_data + read_offset,
src_data + read_offset, bytes_to_copy);
src_data += buffer_size;
dst_data += buffer_size;
}
if (log_buf_snapshot_state)
guc_move_to_next_buf(guc);
else {
/* Used rate limited to avoid deluge of messages, logs might be
* getting consumed by User at a slow rate.
*/
DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
guc->log.capture_miss_count++;
}
}
static void guc_capture_logs_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private, guc.log.flush_work);
i915_guc_capture_logs(dev_priv);
}
static void guc_log_cleanup(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
lockdep_assert_held(&dev_priv->drm.struct_mutex);
/* First disable the flush interrupt */
gen9_disable_guc_interrupts(dev_priv);
if (guc->log.flush_wq)
destroy_workqueue(guc->log.flush_wq);
guc->log.flush_wq = NULL;
if (guc->log.relay_chan)
guc_log_remove_relay_file(guc);
guc->log.relay_chan = NULL;
if (guc->log.buf_addr)
i915_gem_object_unpin_map(guc->log.vma->obj);
guc->log.buf_addr = NULL;
}
static int guc_log_create_extras(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
void *vaddr;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
/* Nothing to do */
if (i915.guc_log_level < 0)
return 0;
if (!guc->log.buf_addr) {
/* Create a WC (Uncached for read) vmalloc mapping of log
* buffer pages, so that we can directly get the data
* (up-to-date) from memory.
*/
vaddr = i915_gem_object_pin_map(guc->log.vma->obj, I915_MAP_WC);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
return ret;
}
guc->log.buf_addr = vaddr;
}
if (!guc->log.relay_chan) {
/* Create a relay channel, so that we have buffers for storing
* the GuC firmware logs, the channel will be linked with a file
* later on when debugfs is registered.
*/
ret = guc_log_create_relay_channel(guc);
if (ret)
return ret;
}
if (!guc->log.flush_wq) {
INIT_WORK(&guc->log.flush_work, guc_capture_logs_work);
/*
* GuC log buffer flush work item has to do register access to
* send the ack to GuC and this work item, if not synced before
* suspend, can potentially get executed after the GFX device is
* suspended.
* By marking the WQ as freezable, we don't have to bother about
* flushing of this work item from the suspend hooks, the pending
* work item if any will be either executed before the suspend
* or scheduled later on resume. This way the handling of work
* item can be kept same between system suspend & rpm suspend.
*/
guc->log.flush_wq = alloc_ordered_workqueue("i915-guc_log",
WQ_HIGHPRI | WQ_FREEZABLE);
if (guc->log.flush_wq == NULL) {
DRM_ERROR("Couldn't allocate the wq for GuC logging\n");
return -ENOMEM;
}
}
return 0;
}
static void guc_log_create(struct intel_guc *guc)
{
struct i915_vma *vma;
unsigned long offset;
uint32_t size, flags;
if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX)
i915.guc_log_level = GUC_LOG_VERBOSITY_MAX;
/* The first page is to save log buffer state. Allocate one
* extra page for others in case for overlap */
size = (1 + GUC_LOG_DPC_PAGES + 1 +
GUC_LOG_ISR_PAGES + 1 +
GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;
vma = guc->log.vma;
if (!vma) {
/* We require SSE 4.1 for fast reads from the GuC log buffer and
* it should be present on the chipsets supporting GuC based
* submisssions.
*/
if (WARN_ON(!i915_has_memcpy_from_wc())) {
/* logging will not be enabled */
i915.guc_log_level = -1;
return;
}
vma = guc_allocate_vma(guc, size);
if (IS_ERR(vma)) {
/* logging will be off */
i915.guc_log_level = -1;
return;
}
guc->log.vma = vma;
if (guc_log_create_extras(guc)) {
guc_log_cleanup(guc);
i915_vma_unpin_and_release(&guc->log.vma);
i915.guc_log_level = -1;
return;
}
}
/* each allocated unit is a page */
flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
(GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) |
(GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
(GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
offset = guc_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
}
static int guc_log_late_setup(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
if (i915.guc_log_level < 0)
return -EINVAL;
/* If log_level was set as -1 at boot time, then setup needed to
* handle log buffer flush interrupts would not have been done yet,
* so do that now.
*/
ret = guc_log_create_extras(guc);
if (ret)
goto err;
ret = guc_log_create_relay_file(guc);
if (ret)
goto err;
return 0;
err:
guc_log_cleanup(guc);
/* logging will remain off */
i915.guc_log_level = -1;
return ret;
}
static void guc_policies_init(struct guc_policies *policies)
{
@@ -1301,7 +815,7 @@ static void guc_addon_create(struct intel_guc *guc)
vma = guc->ads_vma;
if (!vma) {
vma = guc_allocate_vma(guc, PAGE_ALIGN(size));
vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(size));
if (IS_ERR(vma))
return;
@@ -1376,13 +890,13 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv)
if (guc->ctx_pool_vma)
return 0; /* already allocated */
vma = guc_allocate_vma(guc, gemsize);
vma = intel_guc_allocate_vma(guc, gemsize);
if (IS_ERR(vma))
return PTR_ERR(vma);
guc->ctx_pool_vma = vma;
ida_init(&guc->ctx_ids);
guc_log_create(guc);
intel_guc_log_create(guc);
guc_addon_create(guc);
guc->execbuf_client = guc_client_alloc(dev_priv,
@@ -1484,7 +998,7 @@ int intel_guc_suspend(struct drm_i915_private *dev_priv)
struct i915_gem_context *ctx;
u32 data[3];
if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
return 0;
gen9_disable_guc_interrupts(dev_priv);
@@ -1511,7 +1025,7 @@ int intel_guc_resume(struct drm_i915_private *dev_priv)
struct i915_gem_context *ctx;
u32 data[3];
if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
return 0;
if (i915.guc_log_level >= 0)
@@ -1527,103 +1041,4 @@ int intel_guc_resume(struct drm_i915_private *dev_priv)
return intel_guc_send(guc, data, ARRAY_SIZE(data));
}
void i915_guc_capture_logs(struct drm_i915_private *dev_priv)
{
guc_read_update_log_buffer(&dev_priv->guc);
/* Generally device is expected to be active only at this
* time, so get/put should be really quick.
*/
intel_runtime_pm_get(dev_priv);
intel_guc_log_flush_complete(&dev_priv->guc);
intel_runtime_pm_put(dev_priv);
}
void i915_guc_flush_logs(struct drm_i915_private *dev_priv)
{
if (!i915.enable_guc_submission || (i915.guc_log_level < 0))
return;
/* First disable the interrupts, will be renabled afterwards */
gen9_disable_guc_interrupts(dev_priv);
/* Before initiating the forceful flush, wait for any pending/ongoing
* flush to complete otherwise forceful flush may not actually happen.
*/
flush_work(&dev_priv->guc.log.flush_work);
/* Ask GuC to update the log buffer state */
intel_guc_log_flush(&dev_priv->guc);
/* GuC would have updated log buffer by now, so capture it */
i915_guc_capture_logs(dev_priv);
}
void i915_guc_unregister(struct drm_i915_private *dev_priv)
{
if (!i915.enable_guc_submission)
return;
mutex_lock(&dev_priv->drm.struct_mutex);
guc_log_cleanup(&dev_priv->guc);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
void i915_guc_register(struct drm_i915_private *dev_priv)
{
if (!i915.enable_guc_submission)
return;
mutex_lock(&dev_priv->drm.struct_mutex);
guc_log_late_setup(&dev_priv->guc);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
{
union guc_log_control log_param;
int ret;
log_param.value = control_val;
if (log_param.verbosity < GUC_LOG_VERBOSITY_MIN ||
log_param.verbosity > GUC_LOG_VERBOSITY_MAX)
return -EINVAL;
/* This combination doesn't make sense & won't have any effect */
if (!log_param.logging_enabled && (i915.guc_log_level < 0))
return 0;
ret = intel_guc_log_control(&dev_priv->guc, log_param.value);
if (ret < 0) {
DRM_DEBUG_DRIVER("guc_logging_control action failed %d\n", ret);
return ret;
}
i915.guc_log_level = log_param.verbosity;
/* If log_level was set as -1 at boot time, then the relay channel file
* wouldn't have been created by now and interrupts also would not have
* been enabled.
*/
if (!dev_priv->guc.log.relay_chan) {
ret = guc_log_late_setup(&dev_priv->guc);
if (!ret)
gen9_enable_guc_interrupts(dev_priv);
} else if (!log_param.logging_enabled) {
/* Once logging is disabled, GuC won't generate logs & send an
* interrupt. But there could be some data in the log buffer
* which is yet to be captured. So request GuC to update the log
* buffer state and then collect the left over logs.
*/
i915_guc_flush_logs(dev_priv);
/* As logging is disabled, update log level to reflect that */
i915.guc_log_level = -1;
} else {
/* In case interrupts were disabled, enable them now */
gen9_enable_guc_interrupts(dev_priv);
}
return ret;
}

View File

@@ -1170,6 +1170,9 @@ static void gen6_pm_rps_work(struct work_struct *work)
adj *= 2;
else /* CHV needs even encode values */
adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
if (new_delay >= dev_priv->rps.max_freq_softlimit)
adj = 0;
/*
* For better performance, jump directly
* to RPe if we're below it.
@@ -1191,6 +1194,9 @@ static void gen6_pm_rps_work(struct work_struct *work)
adj *= 2;
else /* CHV needs even encode values */
adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
if (new_delay <= dev_priv->rps.min_freq_softlimit)
adj = 0;
} else { /* unknown event */
adj = 0;
}
@@ -1553,41 +1559,68 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
{
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
struct intel_pipe_crc_entry *entry;
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
struct drm_driver *driver = dev_priv->drm.driver;
uint32_t crcs[5];
int head, tail;
spin_lock(&pipe_crc->lock);
if (pipe_crc->source) {
if (!pipe_crc->entries) {
spin_unlock(&pipe_crc->lock);
DRM_DEBUG_KMS("spurious interrupt\n");
return;
}
head = pipe_crc->head;
tail = pipe_crc->tail;
if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
spin_unlock(&pipe_crc->lock);
DRM_ERROR("CRC buffer overflowing\n");
return;
}
entry = &pipe_crc->entries[head];
entry->frame = driver->get_vblank_counter(&dev_priv->drm, pipe);
entry->crc[0] = crc0;
entry->crc[1] = crc1;
entry->crc[2] = crc2;
entry->crc[3] = crc3;
entry->crc[4] = crc4;
head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
pipe_crc->head = head;
if (!pipe_crc->entries) {
spin_unlock(&pipe_crc->lock);
DRM_DEBUG_KMS("spurious interrupt\n");
return;
}
head = pipe_crc->head;
tail = pipe_crc->tail;
if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
wake_up_interruptible(&pipe_crc->wq);
} else {
/*
* For some not yet identified reason, the first CRC is
* bonkers. So let's just wait for the next vblank and read
* out the buggy result.
*
* On CHV sometimes the second CRC is bonkers as well, so
* don't trust that one either.
*/
if (pipe_crc->skipped == 0 ||
(IS_CHERRYVIEW(dev_priv) && pipe_crc->skipped == 1)) {
pipe_crc->skipped++;
spin_unlock(&pipe_crc->lock);
return;
}
spin_unlock(&pipe_crc->lock);
DRM_ERROR("CRC buffer overflowing\n");
return;
crcs[0] = crc0;
crcs[1] = crc1;
crcs[2] = crc2;
crcs[3] = crc3;
crcs[4] = crc4;
drm_crtc_add_crc_entry(&crtc->base, true,
drm_accurate_vblank_count(&crtc->base),
crcs);
}
entry = &pipe_crc->entries[head];
entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm,
pipe);
entry->crc[0] = crc0;
entry->crc[1] = crc1;
entry->crc[2] = crc2;
entry->crc[3] = crc3;
entry->crc[4] = crc4;
head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
pipe_crc->head = head;
spin_unlock(&pipe_crc->lock);
wake_up_interruptible(&pipe_crc->wq);
}
#else
static inline void
@@ -2703,12 +2736,13 @@ static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
* i915_handle_error - handle a gpu error
* @dev_priv: i915 device private
* @engine_mask: mask representing engines that are hung
* @fmt: Error message format string
*
* Do some basic checking of register state at error time and
* dump it to the syslog. Also call i915_capture_error_state() to make
* sure we get a record and make it available in debugfs. Fire a uevent
* so userspace knows something bad happened (should trigger collection
* of a ring dump etc.).
* @fmt: Error message format string
*/
void i915_handle_error(struct drm_i915_private *dev_priv,
u32 engine_mask,

View File

@@ -3597,9 +3597,12 @@ enum {
#define EDP_PSR_PERF_CNT_MASK 0xffffff
#define EDP_PSR_DEBUG_CTL _MMIO(dev_priv->psr_mmio_base + 0x60)
#define EDP_PSR_DEBUG_MASK_LPSP (1<<27)
#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26)
#define EDP_PSR_DEBUG_MASK_HPD (1<<25)
#define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1<<28)
#define EDP_PSR_DEBUG_MASK_LPSP (1<<27)
#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26)
#define EDP_PSR_DEBUG_MASK_HPD (1<<25)
#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1<<16)
#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1<<15)
#define EDP_PSR2_CTL _MMIO(0x6f900)
#define EDP_PSR2_ENABLE (1<<31)
@@ -3614,6 +3617,11 @@ enum {
#define EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4
#define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf<<4)
#define EDP_PSR2_IDLE_MASK 0xf
#define EDP_FRAMES_BEFORE_SU_ENTRY (1<<4)
#define EDP_PSR2_STATUS_CTL _MMIO(0x6f940)
#define EDP_PSR2_STATUS_STATE_MASK (0xf<<28)
#define EDP_PSR2_STATUS_STATE_SHIFT 28
/* VGA port control */
#define ADPA _MMIO(0x61100)
@@ -6446,6 +6454,12 @@ enum {
#define BDW_DPRS_MASK_VBLANK_SRD (1 << 0)
#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
#define CHICKEN_TRANS_A 0x420c0
#define CHICKEN_TRANS_B 0x420c4
#define CHICKEN_TRANS(trans) _MMIO_TRANS(trans, CHICKEN_TRANS_A, CHICKEN_TRANS_B)
#define PSR2_VSC_ENABLE_PROG_HEADER (1<<12)
#define PSR2_ADD_VERTICAL_LINE_COUNT (1<<15)
#define DISP_ARB_CTL _MMIO(0x45000)
#define DISP_FBC_MEMORY_WAKE (1<<31)
#define DISP_TILE_SURFACE_SWIZZLING (1<<13)

View File

@@ -63,6 +63,7 @@ static inline void debug_fence_destroy(struct i915_sw_fence *fence)
static inline void debug_fence_free(struct i915_sw_fence *fence)
{
debug_object_free(fence, &i915_sw_fence_debug_descr);
smp_wmb(); /* flush the change in state before reallocation */
}
static inline void debug_fence_assert(struct i915_sw_fence *fence)

View File

@@ -58,7 +58,7 @@ static u32 calc_residency(struct drm_i915_private *dev_priv,
if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
units <<= 8;
} else if (IS_BROXTON(dev_priv)) {
} else if (IS_GEN9_LP(dev_priv)) {
units = 1;
div = 1200; /* 833.33ns */
}

View File

@@ -450,9 +450,9 @@ TRACE_EVENT(i915_gem_evict_vm,
TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
);
TRACE_EVENT(i915_gem_evict_vma,
TP_PROTO(struct i915_vma *vma, unsigned int flags),
TP_ARGS(vma, flags),
TRACE_EVENT(i915_gem_evict_node,
TP_PROTO(struct i915_address_space *vm, struct drm_mm_node *node, unsigned int flags),
TP_ARGS(vm, node, flags),
TP_STRUCT__entry(
__field(u32, dev)
@@ -464,11 +464,11 @@ TRACE_EVENT(i915_gem_evict_vma,
),
TP_fast_assign(
__entry->dev = vma->vm->i915->drm.primary->index;
__entry->vm = vma->vm;
__entry->start = vma->node.start;
__entry->size = vma->node.size;
__entry->color = vma->node.color;
__entry->dev = vm->i915->drm.primary->index;
__entry->vm = vm;
__entry->start = node->start;
__entry->size = node->size;
__entry->color = node->color;
__entry->flags = flags;
),

View File

@@ -116,22 +116,20 @@ void intel_vgt_deballoon(struct drm_i915_private *dev_priv)
memset(&bl_info, 0, sizeof(bl_info));
}
static int vgt_balloon_space(struct drm_mm *mm,
static int vgt_balloon_space(struct i915_ggtt *ggtt,
struct drm_mm_node *node,
unsigned long start, unsigned long end)
{
unsigned long size = end - start;
if (start == end)
if (start >= end)
return -EINVAL;
DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
start, end, size / 1024);
node->start = start;
node->size = size;
return drm_mm_reserve_node(mm, node);
return i915_gem_gtt_reserve(&ggtt->base, node,
size, start, I915_COLOR_UNEVICTABLE,
0);
}
/**
@@ -214,10 +212,8 @@ int intel_vgt_balloon(struct drm_i915_private *dev_priv)
/* Unmappable graphic memory ballooning */
if (unmappable_base > ggtt->mappable_end) {
ret = vgt_balloon_space(&ggtt->base.mm,
&bl_info.space[2],
ggtt->mappable_end,
unmappable_base);
ret = vgt_balloon_space(ggtt, &bl_info.space[2],
ggtt->mappable_end, unmappable_base);
if (ret)
goto err;
@@ -228,18 +224,15 @@ int intel_vgt_balloon(struct drm_i915_private *dev_priv)
* because it is reserved to the guard page.
*/
if (unmappable_end < ggtt_end - PAGE_SIZE) {
ret = vgt_balloon_space(&ggtt->base.mm,
&bl_info.space[3],
unmappable_end,
ggtt_end - PAGE_SIZE);
ret = vgt_balloon_space(ggtt, &bl_info.space[3],
unmappable_end, ggtt_end - PAGE_SIZE);
if (ret)
goto err;
}
/* Mappable graphic memory ballooning */
if (mappable_base > ggtt->base.start) {
ret = vgt_balloon_space(&ggtt->base.mm,
&bl_info.space[0],
ret = vgt_balloon_space(ggtt, &bl_info.space[0],
ggtt->base.start, mappable_base);
if (ret)
@@ -247,10 +240,8 @@ int intel_vgt_balloon(struct drm_i915_private *dev_priv)
}
if (mappable_end < ggtt->mappable_end) {
ret = vgt_balloon_space(&ggtt->base.mm,
&bl_info.space[1],
mappable_end,
ggtt->mappable_end);
ret = vgt_balloon_space(ggtt, &bl_info.space[1],
mappable_end, ggtt->mappable_end);
if (ret)
goto err;

View File

@@ -45,6 +45,7 @@ i915_vma_retire(struct i915_gem_active *active,
if (i915_vma_is_active(vma))
return;
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
WARN_ON(i915_vma_unbind(vma));
@@ -69,17 +70,15 @@ i915_vma_retire(struct i915_gem_active *active,
}
static struct i915_vma *
__i915_vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view)
vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view)
{
struct i915_vma *vma;
struct rb_node *rb, **p;
int i;
GEM_BUG_ON(vm->closed);
vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
if (vma == NULL)
return ERR_PTR(-ENOMEM);
@@ -87,29 +86,50 @@ __i915_vma_create(struct drm_i915_gem_object *obj,
for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
init_request_active(&vma->last_read[i], i915_vma_retire);
init_request_active(&vma->last_fence, NULL);
list_add(&vma->vm_link, &vm->unbound_list);
vma->vm = vm;
vma->obj = obj;
vma->size = obj->base.size;
vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
if (view) {
vma->ggtt_view = *view;
if (view->type == I915_GGTT_VIEW_PARTIAL) {
GEM_BUG_ON(range_overflows_t(u64,
view->params.partial.offset,
view->params.partial.size,
view->partial.offset,
view->partial.size,
obj->base.size >> PAGE_SHIFT));
vma->size = view->params.partial.size;
vma->size = view->partial.size;
vma->size <<= PAGE_SHIFT;
GEM_BUG_ON(vma->size >= obj->base.size);
} else if (view->type == I915_GGTT_VIEW_ROTATED) {
vma->size =
intel_rotation_info_size(&view->params.rotated);
vma->size = intel_rotation_info_size(&view->rotated);
vma->size <<= PAGE_SHIFT;
}
}
if (unlikely(vma->size > vm->total))
goto err_vma;
GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
if (i915_is_ggtt(vm)) {
if (unlikely(overflows_type(vma->size, u32)))
goto err_vma;
vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
i915_gem_object_get_tiling(obj),
i915_gem_object_get_stride(obj));
if (unlikely(vma->fence_size < vma->size || /* overflow */
vma->fence_size > vm->total))
goto err_vma;
GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
i915_gem_object_get_tiling(obj),
i915_gem_object_get_stride(obj));
GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
vma->flags |= I915_VMA_GGTT;
list_add(&vma->obj_link, &obj->vma_list);
} else {
@@ -131,20 +151,74 @@ __i915_vma_create(struct drm_i915_gem_object *obj,
}
rb_link_node(&vma->obj_node, rb, p);
rb_insert_color(&vma->obj_node, &obj->vma_tree);
list_add(&vma->vm_link, &vm->unbound_list);
return vma;
err_vma:
kmem_cache_free(vm->i915->vmas, vma);
return ERR_PTR(-E2BIG);
}
struct i915_vma *
i915_vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view)
static struct i915_vma *
vma_lookup(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view)
{
struct rb_node *rb;
rb = obj->vma_tree.rb_node;
while (rb) {
struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
long cmp;
cmp = i915_vma_compare(vma, vm, view);
if (cmp == 0)
return vma;
if (cmp < 0)
rb = rb->rb_right;
else
rb = rb->rb_left;
}
return NULL;
}
/**
* i915_vma_instance - return the singleton instance of the VMA
* @obj: parent &struct drm_i915_gem_object to be mapped
* @vm: address space in which the mapping is located
* @view: additional mapping requirements
*
* i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
* the same @view characteristics. If a match is not found, one is created.
* Once created, the VMA is kept until either the object is freed, or the
* address space is closed.
*
* Must be called with struct_mutex held.
*
* Returns the vma, or an error pointer.
*/
struct i915_vma *
i915_vma_instance(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view)
{
struct i915_vma *vma;
lockdep_assert_held(&obj->base.dev->struct_mutex);
GEM_BUG_ON(view && !i915_is_ggtt(vm));
GEM_BUG_ON(i915_gem_obj_to_vma(obj, vm, view));
GEM_BUG_ON(vm->closed);
return __i915_vma_create(obj, vm, view);
vma = vma_lookup(obj, vm, view);
if (!vma)
vma = vma_create(obj, vm, view);
GEM_BUG_ON(!IS_ERR(vma) && i915_vma_is_closed(vma));
GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
GEM_BUG_ON(!IS_ERR(vma) && vma_lookup(obj, vm, view) != vma);
return vma;
}
/**
@@ -195,6 +269,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
return ret;
}
trace_i915_vma_bind(vma, bind_flags);
ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
if (ret)
return ret;
@@ -258,7 +333,8 @@ i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
if (vma->node.size < size)
return true;
if (alignment && vma->node.start & (alignment - 1))
GEM_BUG_ON(alignment && !is_power_of_2(alignment));
if (alignment && !IS_ALIGNED(vma->node.start, alignment))
return true;
if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
@@ -277,31 +353,24 @@ i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
bool mappable, fenceable;
u32 fence_size, fence_alignment;
fence_size = i915_gem_get_ggtt_size(dev_priv,
vma->size,
i915_gem_object_get_tiling(obj));
fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
vma->size,
i915_gem_object_get_tiling(obj),
true);
fenceable = (vma->node.size == fence_size &&
(vma->node.start & (fence_alignment - 1)) == 0);
mappable = (vma->node.start + fence_size <=
dev_priv->ggtt.mappable_end);
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
GEM_BUG_ON(!vma->fence_size);
/*
* Explicitly disable for rotated VMA since the display does not
* need the fence and the VMA is not accessible to other users.
*/
if (mappable && fenceable &&
vma->ggtt_view.type != I915_GGTT_VIEW_ROTATED)
if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
return;
fenceable = (vma->node.size >= vma->fence_size &&
IS_ALIGNED(vma->node.start, vma->fence_alignment));
mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
if (mappable && fenceable)
vma->flags |= I915_VMA_CAN_FENCE;
else
vma->flags &= ~I915_VMA_CAN_FENCE;
@@ -368,22 +437,26 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
size = max(size, vma->size);
if (flags & PIN_MAPPABLE)
size = i915_gem_get_ggtt_size(dev_priv, size,
i915_gem_object_get_tiling(obj));
alignment = max(alignment, vma->display_alignment);
if (flags & PIN_MAPPABLE) {
size = max_t(typeof(size), size, vma->fence_size);
alignment = max_t(typeof(alignment),
alignment, vma->fence_alignment);
}
alignment = max(max(alignment, vma->display_alignment),
i915_gem_get_ggtt_alignment(dev_priv, size,
i915_gem_object_get_tiling(obj),
flags & PIN_MAPPABLE));
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
GEM_BUG_ON(!is_power_of_2(alignment));
start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
end = vma->vm->total;
if (flags & PIN_MAPPABLE)
end = min_t(u64, end, dev_priv->ggtt.mappable_end);
if (flags & PIN_ZONE_4G)
end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
/* If binding the object/GGTT view requires more space than the entire
* aperture has, reject it early before evicting everything in a vain
@@ -403,65 +476,28 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
if (flags & PIN_OFFSET_FIXED) {
u64 offset = flags & PIN_OFFSET_MASK;
if (offset & (alignment - 1) ||
if (!IS_ALIGNED(offset, alignment) ||
range_overflows(offset, size, end)) {
ret = -EINVAL;
goto err_unpin;
}
vma->node.start = offset;
vma->node.size = size;
vma->node.color = obj->cache_level;
ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
if (ret) {
ret = i915_gem_evict_for_vma(vma, flags);
if (ret == 0)
ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
if (ret)
goto err_unpin;
}
} else {
u32 search_flag, alloc_flag;
if (flags & PIN_HIGH) {
search_flag = DRM_MM_SEARCH_BELOW;
alloc_flag = DRM_MM_CREATE_TOP;
} else {
search_flag = DRM_MM_SEARCH_DEFAULT;
alloc_flag = DRM_MM_CREATE_DEFAULT;
}
/* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
* so we know that we always have a minimum alignment of 4096.
* The drm_mm range manager is optimised to return results
* with zero alignment, so where possible use the optimal
* path.
*/
if (alignment <= 4096)
alignment = 0;
search_free:
ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
&vma->node,
size, alignment,
obj->cache_level,
start, end,
search_flag,
alloc_flag);
if (ret) {
ret = i915_gem_evict_something(vma->vm, size, alignment,
obj->cache_level,
start, end,
flags);
if (ret == 0)
goto search_free;
ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
size, offset, obj->cache_level,
flags);
if (ret)
goto err_unpin;
} else {
ret = i915_gem_gtt_insert(vma->vm, &vma->node,
size, alignment, obj->cache_level,
start, end, flags);
if (ret)
goto err_unpin;
}
GEM_BUG_ON(vma->node.start < start);
GEM_BUG_ON(vma->node.start + vma->node.size > end);
}
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
@@ -504,6 +540,7 @@ int __i915_vma_do_pin(struct i915_vma *vma,
if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
__i915_vma_set_map_and_fenceable(vma);
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
return 0;

View File

@@ -55,6 +55,9 @@ struct i915_vma {
u64 size;
u64 display_alignment;
u32 fence_size;
u32 fence_alignment;
unsigned int flags;
/**
* How many users have pinned this object in GTT space. The following
@@ -109,9 +112,9 @@ struct i915_vma {
};
struct i915_vma *
i915_vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view);
i915_vma_instance(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view);
void i915_vma_unpin_and_release(struct i915_vma **p_vma);
@@ -196,15 +199,30 @@ i915_vma_compare(struct i915_vma *vma,
if (cmp)
return cmp;
BUILD_BUG_ON(I915_GGTT_VIEW_NORMAL != 0);
cmp = vma->ggtt_view.type;
if (!view)
return vma->ggtt_view.type;
return cmp;
if (vma->ggtt_view.type != view->type)
return vma->ggtt_view.type - view->type;
cmp -= view->type;
if (cmp)
return cmp;
return memcmp(&vma->ggtt_view.params,
&view->params,
sizeof(view->params));
/* ggtt_view.type also encodes its size so that we both distinguish
* different views using it as a "type" and also use a compact (no
* accessing of uninitialised padding bytes) memcmp without storing
* an extra parameter or adding more code.
*
* To ensure that the memcmp is valid for all branches of the union,
* even though the code looks like it is just comparing one branch,
* we assert above that all branches have the same address, and that
* each branch has a unique type/size.
*/
BUILD_BUG_ON(I915_GGTT_VIEW_NORMAL >= I915_GGTT_VIEW_PARTIAL);
BUILD_BUG_ON(I915_GGTT_VIEW_PARTIAL >= I915_GGTT_VIEW_ROTATED);
BUILD_BUG_ON(offsetof(typeof(*view), rotated) !=
offsetof(typeof(*view), partial));
return memcmp(&vma->ggtt_view.partial, &view->partial, view->type);
}
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
@@ -229,8 +247,11 @@ i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
/* Pin early to prevent the shrinker/eviction logic from destroying
* our vma as we insert and bind.
*/
if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0))
if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0)) {
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
return 0;
}
return __i915_vma_do_pin(vma, size, alignment, flags);
}

View File

@@ -85,6 +85,8 @@ intel_plane_duplicate_state(struct drm_plane *plane)
__drm_atomic_helper_plane_duplicate_state(plane, state);
intel_state->vma = NULL;
return state;
}
@@ -100,6 +102,24 @@ void
intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct i915_vma *vma;
vma = fetch_and_zero(&to_intel_plane_state(state)->vma);
/*
* FIXME: Normally intel_cleanup_plane_fb handles destruction of vma.
* We currently don't clear all planes during driver unload, so we have
* to be able to unpin vma here for now.
*
* Normally this can only happen during unload when kmscon is disabled
* and userspace doesn't attempt to set a framebuffer at all.
*/
if (vma) {
mutex_lock(&plane->dev->struct_mutex);
intel_unpin_fb_vma(vma);
mutex_unlock(&plane->dev->struct_mutex);
}
drm_atomic_helper_plane_destroy_state(plane, state);
}

View File

@@ -34,6 +34,10 @@
* low-power state and comes back to normal.
*/
#define I915_CSR_GLK "i915/glk_dmc_ver1_01.bin"
MODULE_FIRMWARE(I915_CSR_GLK);
#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
MODULE_FIRMWARE(I915_CSR_KBL);
#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
@@ -286,7 +290,9 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
csr->version = css_header->version;
if (IS_KABYLAKE(dev_priv)) {
if (IS_GEMINILAKE(dev_priv)) {
required_version = GLK_CSR_VERSION_REQUIRED;
} else if (IS_KABYLAKE(dev_priv)) {
required_version = KBL_CSR_VERSION_REQUIRED;
} else if (IS_SKYLAKE(dev_priv)) {
required_version = SKL_CSR_VERSION_REQUIRED;
@@ -435,7 +441,9 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
if (!HAS_CSR(dev_priv))
return;
if (IS_KABYLAKE(dev_priv))
if (IS_GEMINILAKE(dev_priv))
csr->fw_path = I915_CSR_GLK;
else if (IS_KABYLAKE(dev_priv))
csr->fw_path = I915_CSR_KBL;
else if (IS_SKYLAKE(dev_priv))
csr->fw_path = I915_CSR_SKL;

View File

@@ -192,7 +192,7 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
hweight8(sseu->slice_mask) > 1;
sseu->has_subslice_pg =
IS_BROXTON(dev_priv) && sseu_subslice_total(sseu) > 1;
IS_GEN9_LP(dev_priv) && sseu_subslice_total(sseu) > 1;
sseu->has_eu_pg = sseu->eu_per_subslice > 2;
if (IS_BROXTON(dev_priv)) {

View File

@@ -2137,11 +2137,10 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
const struct drm_framebuffer *fb,
unsigned int rotation)
{
view->type = I915_GGTT_VIEW_NORMAL;
if (drm_rotation_90_or_270(rotation)) {
*view = i915_ggtt_view_rotated;
view->params.rotated = to_intel_framebuffer(fb)->rot_info;
} else {
*view = i915_ggtt_view_normal;
view->type = I915_GGTT_VIEW_ROTATED;
view->rotated = to_intel_framebuffer(fb)->rot_info;
}
}
@@ -2235,24 +2234,19 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
i915_vma_pin_fence(vma);
}
i915_vma_get(vma);
err:
intel_runtime_pm_put(dev_priv);
return vma;
}
void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
void intel_unpin_fb_vma(struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct i915_ggtt_view view;
struct i915_vma *vma;
WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
intel_fill_fb_ggtt_view(&view, fb, rotation);
vma = i915_gem_object_to_ggtt(obj, &view);
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
i915_vma_unpin_fence(vma);
i915_gem_object_unpin_from_display_plane(vma);
i915_vma_put(vma);
}
static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
@@ -2745,7 +2739,6 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *c;
struct intel_crtc *i;
struct drm_i915_gem_object *obj;
struct drm_plane *primary = intel_crtc->base.primary;
struct drm_plane_state *plane_state = primary->state;
@@ -2770,20 +2763,20 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
* an fb with another CRTC instead
*/
for_each_crtc(dev, c) {
i = to_intel_crtc(c);
struct intel_plane_state *state;
if (c == &intel_crtc->base)
continue;
if (!i->active)
if (!to_intel_crtc(c)->active)
continue;
fb = c->primary->fb;
if (!fb)
state = to_intel_plane_state(c->primary->state);
if (!state->vma)
continue;
obj = intel_fb_obj(fb);
if (i915_gem_object_ggtt_offset(obj, NULL) == plane_config->base) {
if (intel_plane_ggtt_offset(state) == plane_config->base) {
fb = c->primary->fb;
drm_framebuffer_reference(fb);
goto valid_fb;
}
@@ -2796,7 +2789,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
* simplest solution is to just disable the primary plane now and
* pretend the BIOS never had it enabled.
*/
to_intel_plane_state(plane_state)->base.visible = false;
plane_state->visible = false;
crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
intel_pre_disable_primary_noatomic(&intel_crtc->base);
intel_plane->disable_plane(primary, &intel_crtc->base);
@@ -2804,6 +2797,19 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
return;
valid_fb:
mutex_lock(&dev->struct_mutex);
intel_state->vma =
intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
mutex_unlock(&dev->struct_mutex);
if (IS_ERR(intel_state->vma)) {
DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
intel_crtc->pipe, PTR_ERR(intel_state->vma));
intel_state->vma = NULL;
drm_framebuffer_unreference(fb);
return;
}
plane_state->src_x = 0;
plane_state->src_y = 0;
plane_state->src_w = fb->width << 16;
@@ -3099,13 +3105,13 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_GEN(dev_priv) >= 4) {
I915_WRITE(DSPSURF(plane),
intel_fb_gtt_offset(fb, rotation) +
intel_plane_ggtt_offset(plane_state) +
intel_crtc->dspaddr_offset);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPLINOFF(plane), linear_offset);
} else {
I915_WRITE(DSPADDR(plane),
intel_fb_gtt_offset(fb, rotation) +
intel_plane_ggtt_offset(plane_state) +
intel_crtc->dspaddr_offset);
}
POSTING_READ(reg);
@@ -3202,7 +3208,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
I915_WRITE(DSPSURF(plane),
intel_fb_gtt_offset(fb, rotation) +
intel_plane_ggtt_offset(plane_state) +
intel_crtc->dspaddr_offset);
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
@@ -3225,23 +3231,6 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
}
}
u32 intel_fb_gtt_offset(struct drm_framebuffer *fb,
unsigned int rotation)
{
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct i915_ggtt_view view;
struct i915_vma *vma;
intel_fill_fb_ggtt_view(&view, fb, rotation);
vma = i915_gem_object_to_ggtt(obj, &view);
if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
view.type))
return -1;
return i915_ggtt_offset(vma);
}
static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
{
struct drm_device *dev = intel_crtc->base.dev;
@@ -3437,7 +3426,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
}
I915_WRITE(PLANE_SURF(pipe, plane_id),
intel_fb_gtt_offset(fb, rotation) + surf_addr);
intel_plane_ggtt_offset(plane_state) + surf_addr);
POSTING_READ(PLANE_SURF(pipe, plane_id));
}
@@ -3561,23 +3550,19 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
state = drm_atomic_helper_duplicate_state(dev, ctx);
if (IS_ERR(state)) {
ret = PTR_ERR(state);
state = NULL;
DRM_ERROR("Duplicating state failed with %i\n", ret);
goto err;
return;
}
ret = drm_atomic_helper_disable_all(dev, ctx);
if (ret) {
DRM_ERROR("Suspending crtc's failed with %i\n", ret);
goto err;
drm_atomic_state_put(state);
return;
}
dev_priv->modeset_restore_state = state;
state->acquire_ctx = ctx;
return;
err:
drm_atomic_state_put(state);
}
void intel_finish_reset(struct drm_i915_private *dev_priv)
@@ -6886,13 +6871,13 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
if (!intel_crtc->active)
return;
if (to_intel_plane_state(crtc->primary->state)->base.visible) {
if (crtc->primary->state->visible) {
WARN_ON(intel_crtc->flip_work);
intel_pre_disable_primary_noatomic(crtc);
intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
to_intel_plane_state(crtc->primary->state)->base.visible = false;
crtc->primary->state->visible = false;
}
state = drm_atomic_state_alloc(crtc->dev);
@@ -8406,7 +8391,6 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
mode->type = DRM_MODE_TYPE_DRIVER;
mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
mode->flags |= pipe_config->base.adjusted_mode.flags;
mode->hsync = drm_mode_hsync(mode);
mode->vrefresh = drm_mode_vrefresh(mode);
@@ -11291,6 +11275,7 @@ found:
}
old->restore_state = restore_state;
drm_atomic_state_put(state);
/* let the connector get through one full cycle before testing */
intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
@@ -11570,7 +11555,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
flush_work(&work->mmio_work);
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
intel_unpin_fb_vma(work->old_vma);
i915_gem_object_put(work->pending_flip_obj);
mutex_unlock(&dev->struct_mutex);
@@ -12280,8 +12265,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
goto cleanup_pending;
}
work->gtt_offset = intel_fb_gtt_offset(fb, primary->state->rotation);
work->gtt_offset += intel_crtc->dspaddr_offset;
work->old_vma = to_intel_plane_state(primary->state)->vma;
to_intel_plane_state(primary->state)->vma = vma;
work->gtt_offset = i915_ggtt_offset(vma) + intel_crtc->dspaddr_offset;
work->rotation = crtc->primary->state->rotation;
/*
@@ -12336,7 +12323,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
cleanup_request:
i915_add_request_no_flush(request);
cleanup_unpin:
intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
to_intel_plane_state(primary->state)->vma = work->old_vma;
intel_unpin_fb_vma(vma);
cleanup_pending:
atomic_dec(&intel_crtc->unpin_work_count);
unlock:
@@ -12466,7 +12454,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
}
was_visible = old_plane_state->base.visible;
visible = to_intel_plane_state(plane_state)->base.visible;
visible = plane_state->visible;
if (!was_crtc_enabled && WARN_ON(was_visible))
was_visible = false;
@@ -12482,7 +12470,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
* only combine the results from all planes in the current place?
*/
if (!is_crtc_enabled)
to_intel_plane_state(plane_state)->base.visible = visible = false;
plane_state->visible = visible = false;
if (!was_visible && !visible)
return 0;
@@ -14740,6 +14728,7 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
.page_flip = intel_crtc_page_flip,
.atomic_duplicate_state = intel_crtc_duplicate_state,
.atomic_destroy_state = intel_crtc_destroy_state,
.set_crc_source = intel_crtc_set_crc_source,
};
/**
@@ -14836,6 +14825,8 @@ intel_prepare_plane_fb(struct drm_plane *plane,
DRM_DEBUG_KMS("failed to pin object\n");
return PTR_ERR(vma);
}
to_intel_plane_state(new_state)->vma = vma;
}
return 0;
@@ -14854,19 +14845,12 @@ void
intel_cleanup_plane_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct intel_plane_state *old_intel_state;
struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
struct i915_vma *vma;
old_intel_state = to_intel_plane_state(old_state);
if (!obj && !old_obj)
return;
if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
!INTEL_INFO(dev_priv)->cursor_needs_physical))
intel_unpin_fb_obj(old_state->fb, old_state->rotation);
/* Should only be called after a successful intel_prepare_plane_fb()! */
vma = fetch_and_zero(&to_intel_plane_state(old_state)->vma);
if (vma)
intel_unpin_fb_vma(vma);
}
int
@@ -15018,6 +15002,7 @@ intel_legacy_cursor_update(struct drm_plane *plane,
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *old_fb;
struct drm_crtc_state *crtc_state = crtc->state;
struct i915_vma *old_vma;
/*
* When crtc is inactive or there is a modeset pending,
@@ -15089,9 +15074,12 @@ intel_legacy_cursor_update(struct drm_plane *plane,
ret = PTR_ERR(vma);
goto out_unlock;
}
to_intel_plane_state(new_plane_state)->vma = vma;
}
old_fb = old_plane_state->fb;
old_vma = to_intel_plane_state(old_plane_state)->vma;
i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
intel_plane->frontbuffer_bit);
@@ -15101,6 +15089,7 @@ intel_legacy_cursor_update(struct drm_plane *plane,
*to_intel_plane_state(old_plane_state) = *to_intel_plane_state(new_plane_state);
new_plane_state->fence = NULL;
new_plane_state->fb = old_fb;
to_intel_plane_state(new_plane_state)->vma = old_vma;
intel_plane->update_plane(plane,
to_intel_crtc_state(crtc->state),
@@ -15339,7 +15328,7 @@ intel_update_cursor_plane(struct drm_plane *plane,
if (!obj)
addr = 0;
else if (!INTEL_INFO(dev_priv)->cursor_needs_physical)
addr = i915_gem_object_ggtt_offset(obj, NULL);
addr = intel_plane_ggtt_offset(state);
else
addr = obj->phys_handle->busaddr;
@@ -16843,7 +16832,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
* Temporarily change the plane mapping and disable everything
* ... */
plane = crtc->plane;
to_intel_plane_state(crtc->base.primary->state)->base.visible = true;
crtc->base.primary->state->visible = true;
crtc->plane = !plane;
intel_crtc_disable_noatomic(&crtc->base);
crtc->plane = plane;
@@ -16995,7 +16984,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
dev_priv->active_crtcs = 0;
for_each_intel_crtc(dev, crtc) {
struct intel_crtc_state *crtc_state = crtc->config;
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
__drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
memset(crtc_state, 0, sizeof(*crtc_state));
@@ -17014,7 +17004,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
crtc->base.base.id, crtc->base.name,
enableddisabled(crtc->active));
enableddisabled(crtc_state->base.active));
}
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
@@ -17024,7 +17014,11 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
&pll->state.hw_state);
pll->state.crtc_mask = 0;
for_each_intel_crtc(dev, crtc) {
if (crtc->active && crtc->config->shared_dpll == pll)
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
if (crtc_state->base.active &&
crtc_state->shared_dpll == pll)
pll->state.crtc_mask |= 1 << crtc->pipe;
}
pll->active_mask = pll->state.crtc_mask;
@@ -17037,11 +17031,14 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
pipe = 0;
if (encoder->get_hw_state(encoder, &pipe)) {
struct intel_crtc_state *crtc_state;
crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
crtc_state = to_intel_crtc_state(crtc->base.state);
encoder->base.crtc = &crtc->base;
crtc->config->output_types |= 1 << encoder->type;
encoder->get_config(encoder, crtc->config);
crtc_state->output_types |= 1 << encoder->type;
encoder->get_config(encoder, crtc_state);
} else {
encoder->base.crtc = NULL;
}
@@ -17082,14 +17079,16 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
}
for_each_intel_crtc(dev, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
int pixclk = 0;
crtc->base.hwmode = crtc->config->base.adjusted_mode;
crtc->base.hwmode = crtc_state->base.adjusted_mode;
memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
if (crtc->base.state->active) {
intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
if (crtc_state->base.active) {
intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
/*
@@ -17101,17 +17100,17 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
* set a flag to indicate that a full recalculation is
* needed on the next commit.
*/
crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
pixclk = ilk_pipe_pixel_rate(crtc->config);
pixclk = ilk_pipe_pixel_rate(crtc_state);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
pixclk = crtc->config->base.adjusted_mode.crtc_clock;
pixclk = crtc_state->base.adjusted_mode.crtc_clock;
else
WARN_ON(dev_priv->display.modeset_calc_cdclk);
/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
if (IS_BROADWELL(dev_priv) && crtc->config->ips_enabled)
if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
pixclk = DIV_ROUND_UP(pixclk * 100, 95);
drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
@@ -17120,7 +17119,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
dev_priv->min_pixclk[crtc->pipe] = pixclk;
intel_pipe_config_sanity_check(dev_priv, crtc->config);
intel_pipe_config_sanity_check(dev_priv, crtc_state);
}
}
@@ -17221,47 +17220,19 @@ void intel_display_resume(struct drm_device *dev)
if (ret)
DRM_ERROR("Restoring old state failed with %i\n", ret);
drm_atomic_state_put(state);
if (state)
drm_atomic_state_put(state);
}
void intel_modeset_gem_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *c;
struct drm_i915_gem_object *obj;
intel_init_gt_powersave(dev_priv);
intel_modeset_init_hw(dev);
intel_setup_overlay(dev_priv);
/*
* Make sure any fbs we allocated at startup are properly
* pinned & fenced. When we do the allocation it's too early
* for this.
*/
for_each_crtc(dev, c) {
struct i915_vma *vma;
obj = intel_fb_obj(c->primary->fb);
if (obj == NULL)
continue;
mutex_lock(&dev->struct_mutex);
vma = intel_pin_and_fence_fb_obj(c->primary->fb,
c->primary->state->rotation);
mutex_unlock(&dev->struct_mutex);
if (IS_ERR(vma)) {
DRM_ERROR("failed to pin boot fb on pipe %d\n",
to_intel_crtc(c)->pipe);
drm_framebuffer_unreference(c->primary->fb);
c->primary->fb = NULL;
c->primary->crtc = c->primary->state->crtc = NULL;
update_state_fb(c->primary);
c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
}
}
}
int intel_connector_register(struct drm_connector *connector)

View File

@@ -3042,6 +3042,32 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
}
static bool intel_dp_get_y_cord_status(struct intel_dp *intel_dp)
{
uint8_t psr_caps = 0;
drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps);
return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED;
}
static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
{
uint8_t dprx = 0;
drm_dp_dpcd_readb(&intel_dp->aux,
DP_DPRX_FEATURE_ENUMERATION_LIST,
&dprx);
return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
}
static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
{
uint8_t alpm_caps = 0;
drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP, &alpm_caps);
return alpm_caps & DP_ALPM_CAP;
}
/* These are source-specific values. */
uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
@@ -3412,7 +3438,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
if (HAS_DDI(dev_priv)) {
signal_levels = ddi_signal_levels(intel_dp);
if (IS_BROXTON(dev_priv))
if (IS_GEN9_LP(dev_priv))
signal_levels = 0;
else
mask = DDI_BUF_EMP_MASK;
@@ -3620,6 +3646,16 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
DRM_DEBUG_KMS("PSR2 %s on sink",
dev_priv->psr.psr2_support ? "supported" : "not supported");
if (dev_priv->psr.psr2_support) {
dev_priv->psr.y_cord_support =
intel_dp_get_y_cord_status(intel_dp);
dev_priv->psr.colorimetry_support =
intel_dp_get_colorimetry_status(intel_dp);
dev_priv->psr.alpm =
intel_dp_get_alpm_status(intel_dp);
}
}
/* Read the eDP Display control capabilities registers */

View File

@@ -376,6 +376,7 @@ struct intel_atomic_state {
struct intel_plane_state {
struct drm_plane_state base;
struct drm_rect clip;
struct i915_vma *vma;
struct {
u32 offset;
@@ -1067,6 +1068,7 @@ struct intel_flip_work {
struct work_struct mmio_work;
struct drm_crtc *crtc;
struct i915_vma *old_vma;
struct drm_framebuffer *old_fb;
struct drm_i915_gem_object *pending_flip_obj;
struct drm_pending_vblank_event *event;
@@ -1302,7 +1304,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx);
struct i915_vma *
intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
void intel_unpin_fb_vma(struct i915_vma *vma);
struct drm_framebuffer *
__intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
@@ -1391,7 +1393,10 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
u32 intel_fb_gtt_offset(struct drm_framebuffer *fb, unsigned int rotation);
static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
{
return i915_ggtt_offset(state->vma);
}
u32 skl_plane_ctl_format(uint32_t pixel_format);
u32 skl_plane_ctl_tiling(uint64_t fb_modifier);
@@ -1880,5 +1885,11 @@ void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
/* intel_pipe_crc.c */
int intel_pipe_crc_create(struct drm_minor *minor);
void intel_pipe_crc_cleanup(struct drm_minor *minor);
#ifdef CONFIG_DEBUG_FS
int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
size_t *values_cnt);
#else
#define intel_crtc_set_crc_source NULL
#endif
extern const struct file_operations i915_display_crc_ctl_fops;
#endif /* __INTEL_DRV_H__ */

View File

@@ -264,7 +264,7 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
return PTR_ERR(obj);
}
vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_unref;

View File

@@ -173,7 +173,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
if (IS_I945GM(dev_priv))
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
fbc_ctl |= params->fb.fence_reg;
fbc_ctl |= params->vma->fence->id;
I915_WRITE(FBC_CONTROL, fbc_ctl);
}
@@ -193,8 +193,8 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
else
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg;
if (params->vma->fence) {
dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id;
I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
} else {
I915_WRITE(DPFC_FENCE_YOFF, 0);
@@ -251,13 +251,14 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
break;
}
if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
if (params->vma->fence) {
dpfc_ctl |= DPFC_CTL_FENCE_EN;
if (IS_GEN5(dev_priv))
dpfc_ctl |= params->fb.fence_reg;
dpfc_ctl |= params->vma->fence->id;
if (IS_GEN6(dev_priv)) {
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
SNB_CPU_FENCE_ENABLE |
params->vma->fence->id);
I915_WRITE(DPFC_CPU_FENCE_OFFSET,
params->crtc.fence_y_offset);
}
@@ -269,7 +270,8 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
}
I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID);
I915_WRITE(ILK_FBC_RT_BASE,
i915_ggtt_offset(params->vma) | ILK_FBC_RT_VALID);
/* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -319,10 +321,11 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
break;
}
if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
if (params->vma->fence) {
dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
SNB_CPU_FENCE_ENABLE |
params->vma->fence->id);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
} else {
I915_WRITE(SNB_DPFC_CTL_SA,0);
@@ -727,14 +730,6 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
return effective_w <= max_w && effective_h <= max_h;
}
/* XXX replace me when we have VMA tracking for intel_plane_state */
static int get_fence_id(struct drm_framebuffer *fb)
{
struct i915_vma *vma = i915_gem_object_to_ggtt(intel_fb_obj(fb), NULL);
return vma && vma->fence ? vma->fence->id : I915_FENCE_REG_NONE;
}
static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
@@ -743,7 +738,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj;
cache->vma = NULL;
cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
@@ -758,16 +754,10 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
if (!cache->plane.visible)
return;
obj = intel_fb_obj(fb);
/* FIXME: We lack the proper locking here, so only run this on the
* platforms that need. */
if (IS_GEN(dev_priv, 5, 6))
cache->fb.ilk_ggtt_offset = i915_gem_object_ggtt_offset(obj, NULL);
cache->fb.format = fb->format;
cache->fb.stride = fb->pitches[0];
cache->fb.fence_reg = get_fence_id(fb);
cache->fb.tiling_mode = i915_gem_object_get_tiling(obj);
cache->vma = plane_state->vma;
}
static bool intel_fbc_can_activate(struct intel_crtc *crtc)
@@ -784,7 +774,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return false;
}
if (!cache->plane.visible) {
if (!cache->vma) {
fbc->no_fbc_reason = "primary plane not visible";
return false;
}
@@ -807,8 +797,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
* so have no fence associated with it) due to aperture constaints
* at the time of pinning.
*/
if (cache->fb.tiling_mode != I915_TILING_X ||
cache->fb.fence_reg == I915_FENCE_REG_NONE) {
if (!cache->vma->fence) {
fbc->no_fbc_reason = "framebuffer not tiled or fenced";
return false;
}
@@ -888,17 +877,16 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
* zero. */
memset(params, 0, sizeof(*params));
params->vma = cache->vma;
params->crtc.pipe = crtc->pipe;
params->crtc.plane = crtc->plane;
params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc);
params->fb.format = cache->fb.format;
params->fb.stride = cache->fb.stride;
params->fb.fence_reg = cache->fb.fence_reg;
params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
params->fb.ggtt_offset = cache->fb.ilk_ggtt_offset;
}
static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
@@ -1296,7 +1284,7 @@ void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
for_each_intel_crtc(&dev_priv->drm, crtc)
if (intel_crtc_active(crtc) &&
to_intel_plane_state(crtc->base.primary->state)->base.visible)
crtc->base.primary->state->visible)
dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
}

View File

@@ -284,7 +284,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
out_destroy_fbi:
drm_fb_helper_release_fbi(helper);
out_unpin:
intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
intel_unpin_fb_vma(vma);
out_unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -549,7 +549,7 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
if (ifbdev->fb) {
mutex_lock(&ifbdev->helper.dev->struct_mutex);
intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
intel_unpin_fb_vma(ifbdev->vma);
mutex_unlock(&ifbdev->helper.dev->struct_mutex);
drm_framebuffer_remove(&ifbdev->fb->base);
@@ -742,6 +742,9 @@ void intel_fbdev_initial_config_async(struct drm_device *dev)
{
struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
if (!ifbdev)
return;
ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev);
}

View File

@@ -145,7 +145,7 @@
* The GuC firmware layout looks like this:
*
* +-------------------------------+
* | guc_css_header |
* | uc_css_header |
* | |
* | contains major/minor version |
* +-------------------------------+
@@ -172,9 +172,16 @@
* 3. Length info of each component can be found in header, in dwords.
* 4. Modulus and exponent key are not required by driver. They may not appear
* in fw. So driver will load a truncated firmware in this case.
*
* HuC firmware layout is same as GuC firmware.
*
* HuC firmware css header is different. However, the only difference is where
* the version information is saved. The uc_css_header is unified to support
* both. Driver should get HuC version from uc_css_header.huc_sw_version, while
* uc_css_header.guc_sw_version for GuC.
*/
struct guc_css_header {
struct uc_css_header {
uint32_t module_type;
/* header_size includes all non-uCode bits, including css_header, rsa
* key, modulus key and exponent data. */
@@ -205,8 +212,16 @@ struct guc_css_header {
char username[8];
char buildnumber[12];
uint32_t device_id;
uint32_t guc_sw_version;
union {
struct {
uint32_t branch_client_version;
uint32_t sw_version;
} guc;
struct {
uint32_t sw_version;
uint32_t reserved;
} huc;
};
uint32_t prod_preprod_fw;
uint32_t reserved[12];
uint32_t header_info;
@@ -490,6 +505,7 @@ enum intel_guc_action {
INTEL_GUC_ACTION_ENTER_S_STATE = 0x501,
INTEL_GUC_ACTION_EXIT_S_STATE = 0x502,
INTEL_GUC_ACTION_SLPC_REQUEST = 0x3003,
INTEL_GUC_ACTION_AUTHENTICATE_HUC = 0x4000,
INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x0E000,
INTEL_GUC_ACTION_LIMIT
};

View File

@@ -51,12 +51,6 @@
* 512K. In order to exclude 0-512K address space from GGTT, all gfx objects
* used by GuC is pinned with PIN_OFFSET_BIAS along with size of WOPCM.
*
* Firmware log:
* Firmware log is enabled by setting i915.guc_log_level to non-negative level.
* Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
* i915_guc_load_status will print out firmware loading status and scratch
* registers value.
*
*/
#define SKL_FW_MAJOR 6
@@ -81,16 +75,16 @@ MODULE_FIRMWARE(I915_BXT_GUC_UCODE);
MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
/* User-friendly representation of an enum */
const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
{
switch (status) {
case GUC_FIRMWARE_FAIL:
case INTEL_UC_FIRMWARE_FAIL:
return "FAIL";
case GUC_FIRMWARE_NONE:
case INTEL_UC_FIRMWARE_NONE:
return "NONE";
case GUC_FIRMWARE_PENDING:
case INTEL_UC_FIRMWARE_PENDING:
return "PENDING";
case GUC_FIRMWARE_SUCCESS:
case INTEL_UC_FIRMWARE_SUCCESS:
return "SUCCESS";
default:
return "UNKNOWN!";
@@ -278,7 +272,7 @@ static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv,
struct i915_vma *vma)
{
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
unsigned long offset;
struct sg_table *sg = vma->pages;
u32 status, rsa[UOS_RSA_SCRATCH_MAX_COUNT];
@@ -334,12 +328,12 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv,
return ret;
}
static u32 guc_wopcm_size(struct drm_i915_private *dev_priv)
u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv)
{
u32 wopcm_size = GUC_WOPCM_TOP;
/* On BXT, the top of WOPCM is reserved for RC6 context */
if (IS_BROXTON(dev_priv))
if (IS_GEN9_LP(dev_priv))
wopcm_size -= BXT_GUC_WOPCM_RC6_RESERVED;
return wopcm_size;
@@ -350,29 +344,27 @@ static u32 guc_wopcm_size(struct drm_i915_private *dev_priv)
*/
static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
{
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
struct i915_vma *vma;
int ret;
ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false);
ret = i915_gem_object_set_to_gtt_domain(guc_fw->obj, false);
if (ret) {
DRM_DEBUG_DRIVER("set-domain failed %d\n", ret);
return ret;
}
vma = i915_gem_object_ggtt_pin(guc_fw->guc_fw_obj, NULL, 0, 0, 0);
vma = i915_gem_object_ggtt_pin(guc_fw->obj, NULL, 0, 0,
PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
if (IS_ERR(vma)) {
DRM_DEBUG_DRIVER("pin failed %d\n", (int)PTR_ERR(vma));
return PTR_ERR(vma);
}
/* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
/* init WOPCM */
I915_WRITE(GUC_WOPCM_SIZE, guc_wopcm_size(dev_priv));
I915_WRITE(GUC_WOPCM_SIZE, intel_guc_wopcm_size(dev_priv));
I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE);
/* Enable MIA caching. GuC clock gating is disabled. */
@@ -388,7 +380,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
if (IS_BROXTON(dev_priv))
if (IS_GEN9_LP(dev_priv))
I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
else
I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
@@ -450,14 +442,14 @@ static int guc_hw_reset(struct drm_i915_private *dev_priv)
*/
int intel_guc_setup(struct drm_i915_private *dev_priv)
{
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
const char *fw_path = guc_fw->guc_fw_path;
struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
const char *fw_path = guc_fw->path;
int retries, ret, err;
DRM_DEBUG_DRIVER("GuC fw status: path %s, fetch %s, load %s\n",
fw_path,
intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
intel_uc_fw_status_repr(guc_fw->fetch_status),
intel_uc_fw_status_repr(guc_fw->load_status));
/* Loading forbidden, or no firmware to load? */
if (!i915.enable_guc_loading) {
@@ -475,10 +467,10 @@ int intel_guc_setup(struct drm_i915_private *dev_priv)
}
/* Fetch failed, or already fetched but failed to load? */
if (guc_fw->guc_fw_fetch_status != GUC_FIRMWARE_SUCCESS) {
if (guc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS) {
err = -EIO;
goto fail;
} else if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL) {
} else if (guc_fw->load_status == INTEL_UC_FIRMWARE_FAIL) {
err = -ENOEXEC;
goto fail;
}
@@ -486,11 +478,14 @@ int intel_guc_setup(struct drm_i915_private *dev_priv)
guc_interrupts_release(dev_priv);
gen9_reset_guc_interrupts(dev_priv);
guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
/* We need to notify the guc whenever we change the GGTT */
i915_ggtt_enable_guc(dev_priv);
guc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
intel_uc_fw_status_repr(guc_fw->fetch_status),
intel_uc_fw_status_repr(guc_fw->load_status));
err = i915_guc_submission_init(dev_priv);
if (err)
@@ -511,6 +506,7 @@ int intel_guc_setup(struct drm_i915_private *dev_priv)
if (err)
goto fail;
intel_huc_load(dev_priv);
err = guc_ucode_xfer(dev_priv);
if (!err)
break;
@@ -522,11 +518,13 @@ int intel_guc_setup(struct drm_i915_private *dev_priv)
"retry %d more time(s)\n", err, retries);
}
guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS;
guc_fw->load_status = INTEL_UC_FIRMWARE_SUCCESS;
DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
intel_uc_fw_status_repr(guc_fw->fetch_status),
intel_uc_fw_status_repr(guc_fw->load_status));
intel_guc_auth_huc(dev_priv);
if (i915.enable_guc_submission) {
if (i915.guc_log_level >= 0)
@@ -541,12 +539,13 @@ int intel_guc_setup(struct drm_i915_private *dev_priv)
return 0;
fail:
if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
if (guc_fw->load_status == INTEL_UC_FIRMWARE_PENDING)
guc_fw->load_status = INTEL_UC_FIRMWARE_FAIL;
guc_interrupts_release(dev_priv);
i915_guc_submission_disable(dev_priv);
i915_guc_submission_fini(dev_priv);
i915_ggtt_disable_guc(dev_priv);
/*
* We've failed to load the firmware :(
@@ -587,93 +586,108 @@ fail:
return ret;
}
static void guc_fw_fetch(struct drm_i915_private *dev_priv,
struct intel_guc_fw *guc_fw)
void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
struct intel_uc_fw *uc_fw)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
struct drm_i915_gem_object *obj;
const struct firmware *fw = NULL;
struct guc_css_header *css;
struct uc_css_header *css;
size_t size;
int err;
DRM_DEBUG_DRIVER("before requesting firmware: GuC fw fetch status %s\n",
intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
DRM_DEBUG_DRIVER("before requesting firmware: uC fw fetch status %s\n",
intel_uc_fw_status_repr(uc_fw->fetch_status));
err = request_firmware(&fw, guc_fw->guc_fw_path, &pdev->dev);
err = request_firmware(&fw, uc_fw->path, &pdev->dev);
if (err)
goto fail;
if (!fw)
goto fail;
DRM_DEBUG_DRIVER("fetch GuC fw from %s succeeded, fw %p\n",
guc_fw->guc_fw_path, fw);
DRM_DEBUG_DRIVER("fetch uC fw from %s succeeded, fw %p\n",
uc_fw->path, fw);
/* Check the size of the blob before examining buffer contents */
if (fw->size < sizeof(struct guc_css_header)) {
if (fw->size < sizeof(struct uc_css_header)) {
DRM_NOTE("Firmware header is missing\n");
goto fail;
}
css = (struct guc_css_header *)fw->data;
css = (struct uc_css_header *)fw->data;
/* Firmware bits always start from header */
guc_fw->header_offset = 0;
guc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
uc_fw->header_offset = 0;
uc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
if (guc_fw->header_size != sizeof(struct guc_css_header)) {
if (uc_fw->header_size != sizeof(struct uc_css_header)) {
DRM_NOTE("CSS header definition mismatch\n");
goto fail;
}
/* then, uCode */
guc_fw->ucode_offset = guc_fw->header_offset + guc_fw->header_size;
guc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size;
uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
/* now RSA */
if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
DRM_NOTE("RSA key size is bad\n");
goto fail;
}
guc_fw->rsa_offset = guc_fw->ucode_offset + guc_fw->ucode_size;
guc_fw->rsa_size = css->key_size_dw * sizeof(u32);
uc_fw->rsa_offset = uc_fw->ucode_offset + uc_fw->ucode_size;
uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
/* At least, it should have header, uCode and RSA. Size of all three. */
size = guc_fw->header_size + guc_fw->ucode_size + guc_fw->rsa_size;
size = uc_fw->header_size + uc_fw->ucode_size + uc_fw->rsa_size;
if (fw->size < size) {
DRM_NOTE("Missing firmware components\n");
goto fail;
}
/* Header and uCode will be loaded to WOPCM. Size of the two. */
size = guc_fw->header_size + guc_fw->ucode_size;
if (size > guc_wopcm_size(dev_priv)) {
DRM_NOTE("Firmware is too large to fit in WOPCM\n");
goto fail;
}
/*
* The GuC firmware image has the version number embedded at a well-known
* offset within the firmware blob; note that major / minor version are
* TWO bytes each (i.e. u16), although all pointers and offsets are defined
* in terms of bytes (u8).
*/
guc_fw->guc_fw_major_found = css->guc_sw_version >> 16;
guc_fw->guc_fw_minor_found = css->guc_sw_version & 0xFFFF;
switch (uc_fw->fw) {
case INTEL_UC_FW_TYPE_GUC:
/* Header and uCode will be loaded to WOPCM. Size of the two. */
size = uc_fw->header_size + uc_fw->ucode_size;
if (guc_fw->guc_fw_major_found != guc_fw->guc_fw_major_wanted ||
guc_fw->guc_fw_minor_found < guc_fw->guc_fw_minor_wanted) {
DRM_NOTE("GuC firmware version %d.%d, required %d.%d\n",
guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
/* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
if (size > intel_guc_wopcm_size(dev_priv)) {
DRM_ERROR("Firmware is too large to fit in WOPCM\n");
goto fail;
}
uc_fw->major_ver_found = css->guc.sw_version >> 16;
uc_fw->minor_ver_found = css->guc.sw_version & 0xFFFF;
break;
case INTEL_UC_FW_TYPE_HUC:
uc_fw->major_ver_found = css->huc.sw_version >> 16;
uc_fw->minor_ver_found = css->huc.sw_version & 0xFFFF;
break;
default:
DRM_ERROR("Unknown firmware type %d\n", uc_fw->fw);
err = -ENOEXEC;
goto fail;
}
if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
DRM_NOTE("uC firmware version %d.%d, required %d.%d\n",
uc_fw->major_ver_found, uc_fw->minor_ver_found,
uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
err = -ENOEXEC;
goto fail;
}
DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
uc_fw->major_ver_found, uc_fw->minor_ver_found,
uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
mutex_lock(&dev_priv->drm.struct_mutex);
obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size);
@@ -683,31 +697,31 @@ static void guc_fw_fetch(struct drm_i915_private *dev_priv,
goto fail;
}
guc_fw->guc_fw_obj = obj;
guc_fw->guc_fw_size = fw->size;
uc_fw->obj = obj;
uc_fw->size = fw->size;
DRM_DEBUG_DRIVER("GuC fw fetch status SUCCESS, obj %p\n",
guc_fw->guc_fw_obj);
DRM_DEBUG_DRIVER("uC fw fetch status SUCCESS, obj %p\n",
uc_fw->obj);
release_firmware(fw);
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_SUCCESS;
uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS;
return;
fail:
DRM_WARN("Failed to fetch valid GuC firmware from %s (error %d)\n",
guc_fw->guc_fw_path, err);
DRM_DEBUG_DRIVER("GuC fw fetch status FAIL; err %d, fw %p, obj %p\n",
err, fw, guc_fw->guc_fw_obj);
DRM_WARN("Failed to fetch valid uC firmware from %s (error %d)\n",
uc_fw->path, err);
DRM_DEBUG_DRIVER("uC fw fetch status FAIL; err %d, fw %p, obj %p\n",
err, fw, uc_fw->obj);
mutex_lock(&dev_priv->drm.struct_mutex);
obj = guc_fw->guc_fw_obj;
obj = uc_fw->obj;
if (obj)
i915_gem_object_put(obj);
guc_fw->guc_fw_obj = NULL;
uc_fw->obj = NULL;
mutex_unlock(&dev_priv->drm.struct_mutex);
release_firmware(fw); /* OK even if fw is NULL */
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL;
}
/**
@@ -721,7 +735,7 @@ fail:
*/
void intel_guc_init(struct drm_i915_private *dev_priv)
{
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
const char *fw_path;
if (!HAS_GUC(dev_priv)) {
@@ -739,23 +753,23 @@ void intel_guc_init(struct drm_i915_private *dev_priv)
fw_path = NULL;
} else if (IS_SKYLAKE(dev_priv)) {
fw_path = I915_SKL_GUC_UCODE;
guc_fw->guc_fw_major_wanted = SKL_FW_MAJOR;
guc_fw->guc_fw_minor_wanted = SKL_FW_MINOR;
guc_fw->major_ver_wanted = SKL_FW_MAJOR;
guc_fw->minor_ver_wanted = SKL_FW_MINOR;
} else if (IS_BROXTON(dev_priv)) {
fw_path = I915_BXT_GUC_UCODE;
guc_fw->guc_fw_major_wanted = BXT_FW_MAJOR;
guc_fw->guc_fw_minor_wanted = BXT_FW_MINOR;
guc_fw->major_ver_wanted = BXT_FW_MAJOR;
guc_fw->minor_ver_wanted = BXT_FW_MINOR;
} else if (IS_KABYLAKE(dev_priv)) {
fw_path = I915_KBL_GUC_UCODE;
guc_fw->guc_fw_major_wanted = KBL_FW_MAJOR;
guc_fw->guc_fw_minor_wanted = KBL_FW_MINOR;
guc_fw->major_ver_wanted = KBL_FW_MAJOR;
guc_fw->minor_ver_wanted = KBL_FW_MINOR;
} else {
fw_path = ""; /* unknown device */
}
guc_fw->guc_fw_path = fw_path;
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE;
guc_fw->path = fw_path;
guc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
guc_fw->load_status = INTEL_UC_FIRMWARE_NONE;
/* Early (and silent) return if GuC loading is disabled */
if (!i915.enable_guc_loading)
@@ -765,9 +779,9 @@ void intel_guc_init(struct drm_i915_private *dev_priv)
if (*fw_path == '\0')
return;
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING;
guc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path);
guc_fw_fetch(dev_priv, guc_fw);
intel_uc_fw_fetch(dev_priv, guc_fw);
/* status must now be FAIL or SUCCESS */
}
@@ -777,17 +791,17 @@ void intel_guc_init(struct drm_i915_private *dev_priv)
*/
void intel_guc_fini(struct drm_i915_private *dev_priv)
{
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
mutex_lock(&dev_priv->drm.struct_mutex);
guc_interrupts_release(dev_priv);
i915_guc_submission_disable(dev_priv);
i915_guc_submission_fini(dev_priv);
if (guc_fw->guc_fw_obj)
i915_gem_object_put(guc_fw->guc_fw_obj);
guc_fw->guc_fw_obj = NULL;
if (guc_fw->obj)
i915_gem_object_put(guc_fw->obj);
guc_fw->obj = NULL;
mutex_unlock(&dev_priv->drm.struct_mutex);
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
guc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
}

View File

@@ -0,0 +1,658 @@
/*
* Copyright © 2014-2017 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include <linux/debugfs.h>
#include <linux/relay.h>
#include "i915_drv.h"
static void guc_log_capture_logs(struct intel_guc *guc);
/**
* DOC: GuC firmware log
*
* Firmware log is enabled by setting i915.guc_log_level to non-negative level.
* Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
* i915_guc_load_status will print out firmware loading status and scratch
* registers value.
*
*/
static int guc_log_flush_complete(struct intel_guc *guc)
{
u32 action[] = {
INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE
};
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
static int guc_log_flush(struct intel_guc *guc)
{
u32 action[] = {
INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH,
0
};
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
static int guc_log_control(struct intel_guc *guc, u32 control_val)
{
u32 action[] = {
INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING,
control_val
};
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
/*
* Sub buffer switch callback. Called whenever relay has to switch to a new
* sub buffer, relay stays on the same sub buffer if 0 is returned.
*/
static int subbuf_start_callback(struct rchan_buf *buf,
void *subbuf,
void *prev_subbuf,
size_t prev_padding)
{
/* Use no-overwrite mode by default, where relay will stop accepting
* new data if there are no empty sub buffers left.
* There is no strict synchronization enforced by relay between Consumer
* and Producer. In overwrite mode, there is a possibility of getting
* inconsistent/garbled data, the producer could be writing on to the
* same sub buffer from which Consumer is reading. This can't be avoided
* unless Consumer is fast enough and can always run in tandem with
* Producer.
*/
if (relay_buf_full(buf))
return 0;
return 1;
}
/*
* file_create() callback. Creates relay file in debugfs.
*/
static struct dentry *create_buf_file_callback(const char *filename,
struct dentry *parent,
umode_t mode,
struct rchan_buf *buf,
int *is_global)
{
struct dentry *buf_file;
/* This to enable the use of a single buffer for the relay channel and
* correspondingly have a single file exposed to User, through which
* it can collect the logs in order without any post-processing.
* Need to set 'is_global' even if parent is NULL for early logging.
*/
*is_global = 1;
if (!parent)
return NULL;
/* Not using the channel filename passed as an argument, since for each
* channel relay appends the corresponding CPU number to the filename
* passed in relay_open(). This should be fine as relay just needs a
* dentry of the file associated with the channel buffer and that file's
* name need not be same as the filename passed as an argument.
*/
buf_file = debugfs_create_file("guc_log", mode,
parent, buf, &relay_file_operations);
return buf_file;
}
/*
* file_remove() default callback. Removes relay file in debugfs.
*/
static int remove_buf_file_callback(struct dentry *dentry)
{
debugfs_remove(dentry);
return 0;
}
/* relay channel callbacks */
static struct rchan_callbacks relay_callbacks = {
.subbuf_start = subbuf_start_callback,
.create_buf_file = create_buf_file_callback,
.remove_buf_file = remove_buf_file_callback,
};
static void guc_log_remove_relay_file(struct intel_guc *guc)
{
relay_close(guc->log.relay_chan);
}
static int guc_log_create_relay_channel(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct rchan *guc_log_relay_chan;
size_t n_subbufs, subbuf_size;
/* Keep the size of sub buffers same as shared log buffer */
subbuf_size = guc->log.vma->obj->base.size;
/* Store up to 8 snapshots, which is large enough to buffer sufficient
* boot time logs and provides enough leeway to User, in terms of
* latency, for consuming the logs from relay. Also doesn't take
* up too much memory.
*/
n_subbufs = 8;
guc_log_relay_chan = relay_open(NULL, NULL, subbuf_size,
n_subbufs, &relay_callbacks, dev_priv);
if (!guc_log_relay_chan) {
DRM_ERROR("Couldn't create relay chan for GuC logging\n");
return -ENOMEM;
}
GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
guc->log.relay_chan = guc_log_relay_chan;
return 0;
}
static int guc_log_create_relay_file(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct dentry *log_dir;
int ret;
/* For now create the log file in /sys/kernel/debug/dri/0 dir */
log_dir = dev_priv->drm.primary->debugfs_root;
/* If /sys/kernel/debug/dri/0 location do not exist, then debugfs is
* not mounted and so can't create the relay file.
* The relay API seems to fit well with debugfs only, for availing relay
* there are 3 requirements which can be met for debugfs file only in a
* straightforward/clean manner :-
* i) Need the associated dentry pointer of the file, while opening the
* relay channel.
* ii) Should be able to use 'relay_file_operations' fops for the file.
* iii) Set the 'i_private' field of file's inode to the pointer of
* relay channel buffer.
*/
if (!log_dir) {
DRM_ERROR("Debugfs dir not available yet for GuC log file\n");
return -ENODEV;
}
ret = relay_late_setup_files(guc->log.relay_chan, "guc_log", log_dir);
if (ret) {
DRM_ERROR("Couldn't associate relay chan with file %d\n", ret);
return ret;
}
return 0;
}
static void guc_move_to_next_buf(struct intel_guc *guc)
{
/* Make sure the updates made in the sub buffer are visible when
* Consumer sees the following update to offset inside the sub buffer.
*/
smp_wmb();
/* All data has been written, so now move the offset of sub buffer. */
relay_reserve(guc->log.relay_chan, guc->log.vma->obj->base.size);
/* Switch to the next sub buffer */
relay_flush(guc->log.relay_chan);
}
static void *guc_get_write_buffer(struct intel_guc *guc)
{
if (!guc->log.relay_chan)
return NULL;
/* Just get the base address of a new sub buffer and copy data into it
* ourselves. NULL will be returned in no-overwrite mode, if all sub
* buffers are full. Could have used the relay_write() to indirectly
* copy the data, but that would have been bit convoluted, as we need to
* write to only certain locations inside a sub buffer which cannot be
* done without using relay_reserve() along with relay_write(). So its
* better to use relay_reserve() alone.
*/
return relay_reserve(guc->log.relay_chan, 0);
}
static bool guc_check_log_buf_overflow(struct intel_guc *guc,
enum guc_log_buffer_type type,
unsigned int full_cnt)
{
unsigned int prev_full_cnt = guc->log.prev_overflow_count[type];
bool overflow = false;
if (full_cnt != prev_full_cnt) {
overflow = true;
guc->log.prev_overflow_count[type] = full_cnt;
guc->log.total_overflow_count[type] += full_cnt - prev_full_cnt;
if (full_cnt < prev_full_cnt) {
/* buffer_full_cnt is a 4 bit counter */
guc->log.total_overflow_count[type] += 16;
}
DRM_ERROR_RATELIMITED("GuC log buffer overflow\n");
}
return overflow;
}
static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
{
switch (type) {
case GUC_ISR_LOG_BUFFER:
return (GUC_LOG_ISR_PAGES + 1) * PAGE_SIZE;
case GUC_DPC_LOG_BUFFER:
return (GUC_LOG_DPC_PAGES + 1) * PAGE_SIZE;
case GUC_CRASH_DUMP_LOG_BUFFER:
return (GUC_LOG_CRASH_PAGES + 1) * PAGE_SIZE;
default:
MISSING_CASE(type);
}
return 0;
}
static void guc_read_update_log_buffer(struct intel_guc *guc)
{
unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt;
struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state;
struct guc_log_buffer_state log_buf_state_local;
enum guc_log_buffer_type type;
void *src_data, *dst_data;
bool new_overflow;
if (WARN_ON(!guc->log.buf_addr))
return;
/* Get the pointer to shared GuC log buffer */
log_buf_state = src_data = guc->log.buf_addr;
/* Get the pointer to local buffer to store the logs */
log_buf_snapshot_state = dst_data = guc_get_write_buffer(guc);
/* Actual logs are present from the 2nd page */
src_data += PAGE_SIZE;
dst_data += PAGE_SIZE;
for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
/* Make a copy of the state structure, inside GuC log buffer
* (which is uncached mapped), on the stack to avoid reading
* from it multiple times.
*/
memcpy(&log_buf_state_local, log_buf_state,
sizeof(struct guc_log_buffer_state));
buffer_size = guc_get_log_buffer_size(type);
read_offset = log_buf_state_local.read_ptr;
write_offset = log_buf_state_local.sampled_write_ptr;
full_cnt = log_buf_state_local.buffer_full_cnt;
/* Bookkeeping stuff */
guc->log.flush_count[type] += log_buf_state_local.flush_to_file;
new_overflow = guc_check_log_buf_overflow(guc, type, full_cnt);
/* Update the state of shared log buffer */
log_buf_state->read_ptr = write_offset;
log_buf_state->flush_to_file = 0;
log_buf_state++;
if (unlikely(!log_buf_snapshot_state))
continue;
/* First copy the state structure in snapshot buffer */
memcpy(log_buf_snapshot_state, &log_buf_state_local,
sizeof(struct guc_log_buffer_state));
/* The write pointer could have been updated by GuC firmware,
* after sending the flush interrupt to Host, for consistency
* set write pointer value to same value of sampled_write_ptr
* in the snapshot buffer.
*/
log_buf_snapshot_state->write_ptr = write_offset;
log_buf_snapshot_state++;
/* Now copy the actual logs. */
if (unlikely(new_overflow)) {
/* copy the whole buffer in case of overflow */
read_offset = 0;
write_offset = buffer_size;
} else if (unlikely((read_offset > buffer_size) ||
(write_offset > buffer_size))) {
DRM_ERROR("invalid log buffer state\n");
/* copy whole buffer as offsets are unreliable */
read_offset = 0;
write_offset = buffer_size;
}
/* Just copy the newly written data */
if (read_offset > write_offset) {
i915_memcpy_from_wc(dst_data, src_data, write_offset);
bytes_to_copy = buffer_size - read_offset;
} else {
bytes_to_copy = write_offset - read_offset;
}
i915_memcpy_from_wc(dst_data + read_offset,
src_data + read_offset, bytes_to_copy);
src_data += buffer_size;
dst_data += buffer_size;
}
if (log_buf_snapshot_state)
guc_move_to_next_buf(guc);
else {
/* Used rate limited to avoid deluge of messages, logs might be
* getting consumed by User at a slow rate.
*/
DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
guc->log.capture_miss_count++;
}
}
static void guc_log_cleanup(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
lockdep_assert_held(&dev_priv->drm.struct_mutex);
/* First disable the flush interrupt */
gen9_disable_guc_interrupts(dev_priv);
if (guc->log.flush_wq)
destroy_workqueue(guc->log.flush_wq);
guc->log.flush_wq = NULL;
if (guc->log.relay_chan)
guc_log_remove_relay_file(guc);
guc->log.relay_chan = NULL;
if (guc->log.buf_addr)
i915_gem_object_unpin_map(guc->log.vma->obj);
guc->log.buf_addr = NULL;
}
static void capture_logs_work(struct work_struct *work)
{
struct intel_guc *guc =
container_of(work, struct intel_guc, log.flush_work);
guc_log_capture_logs(guc);
}
static int guc_log_create_extras(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
void *vaddr;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
/* Nothing to do */
if (i915.guc_log_level < 0)
return 0;
if (!guc->log.buf_addr) {
/* Create a WC (Uncached for read) vmalloc mapping of log
* buffer pages, so that we can directly get the data
* (up-to-date) from memory.
*/
vaddr = i915_gem_object_pin_map(guc->log.vma->obj, I915_MAP_WC);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
return ret;
}
guc->log.buf_addr = vaddr;
}
if (!guc->log.relay_chan) {
/* Create a relay channel, so that we have buffers for storing
* the GuC firmware logs, the channel will be linked with a file
* later on when debugfs is registered.
*/
ret = guc_log_create_relay_channel(guc);
if (ret)
return ret;
}
if (!guc->log.flush_wq) {
INIT_WORK(&guc->log.flush_work, capture_logs_work);
/*
* GuC log buffer flush work item has to do register access to
* send the ack to GuC and this work item, if not synced before
* suspend, can potentially get executed after the GFX device is
* suspended.
* By marking the WQ as freezable, we don't have to bother about
* flushing of this work item from the suspend hooks, the pending
* work item if any will be either executed before the suspend
* or scheduled later on resume. This way the handling of work
* item can be kept same between system suspend & rpm suspend.
*/
guc->log.flush_wq = alloc_ordered_workqueue("i915-guc_log",
WQ_HIGHPRI | WQ_FREEZABLE);
if (guc->log.flush_wq == NULL) {
DRM_ERROR("Couldn't allocate the wq for GuC logging\n");
return -ENOMEM;
}
}
return 0;
}
void intel_guc_log_create(struct intel_guc *guc)
{
struct i915_vma *vma;
unsigned long offset;
uint32_t size, flags;
if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX)
i915.guc_log_level = GUC_LOG_VERBOSITY_MAX;
/* The first page is to save log buffer state. Allocate one
* extra page for others in case for overlap */
size = (1 + GUC_LOG_DPC_PAGES + 1 +
GUC_LOG_ISR_PAGES + 1 +
GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;
vma = guc->log.vma;
if (!vma) {
/* We require SSE 4.1 for fast reads from the GuC log buffer and
* it should be present on the chipsets supporting GuC based
* submisssions.
*/
if (WARN_ON(!i915_has_memcpy_from_wc())) {
/* logging will not be enabled */
i915.guc_log_level = -1;
return;
}
vma = intel_guc_allocate_vma(guc, size);
if (IS_ERR(vma)) {
/* logging will be off */
i915.guc_log_level = -1;
return;
}
guc->log.vma = vma;
if (guc_log_create_extras(guc)) {
guc_log_cleanup(guc);
i915_vma_unpin_and_release(&guc->log.vma);
i915.guc_log_level = -1;
return;
}
}
/* each allocated unit is a page */
flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
(GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) |
(GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
(GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
offset = guc_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
}
static int guc_log_late_setup(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
if (i915.guc_log_level < 0)
return -EINVAL;
/* If log_level was set as -1 at boot time, then setup needed to
* handle log buffer flush interrupts would not have been done yet,
* so do that now.
*/
ret = guc_log_create_extras(guc);
if (ret)
goto err;
ret = guc_log_create_relay_file(guc);
if (ret)
goto err;
return 0;
err:
guc_log_cleanup(guc);
/* logging will remain off */
i915.guc_log_level = -1;
return ret;
}
static void guc_log_capture_logs(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
guc_read_update_log_buffer(guc);
/* Generally device is expected to be active only at this
* time, so get/put should be really quick.
*/
intel_runtime_pm_get(dev_priv);
guc_log_flush_complete(guc);
intel_runtime_pm_put(dev_priv);
}
static void guc_flush_logs(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
if (!i915.enable_guc_submission || (i915.guc_log_level < 0))
return;
/* First disable the interrupts, will be renabled afterwards */
gen9_disable_guc_interrupts(dev_priv);
/* Before initiating the forceful flush, wait for any pending/ongoing
* flush to complete otherwise forceful flush may not actually happen.
*/
flush_work(&guc->log.flush_work);
/* Ask GuC to update the log buffer state */
guc_log_flush(guc);
/* GuC would have updated log buffer by now, so capture it */
guc_log_capture_logs(guc);
}
int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
{
struct intel_guc *guc = &dev_priv->guc;
union guc_log_control log_param;
int ret;
log_param.value = control_val;
if (log_param.verbosity < GUC_LOG_VERBOSITY_MIN ||
log_param.verbosity > GUC_LOG_VERBOSITY_MAX)
return -EINVAL;
/* This combination doesn't make sense & won't have any effect */
if (!log_param.logging_enabled && (i915.guc_log_level < 0))
return 0;
ret = guc_log_control(guc, log_param.value);
if (ret < 0) {
DRM_DEBUG_DRIVER("guc_logging_control action failed %d\n", ret);
return ret;
}
i915.guc_log_level = log_param.verbosity;
/* If log_level was set as -1 at boot time, then the relay channel file
* wouldn't have been created by now and interrupts also would not have
* been enabled.
*/
if (!dev_priv->guc.log.relay_chan) {
ret = guc_log_late_setup(guc);
if (!ret)
gen9_enable_guc_interrupts(dev_priv);
} else if (!log_param.logging_enabled) {
/* Once logging is disabled, GuC won't generate logs & send an
* interrupt. But there could be some data in the log buffer
* which is yet to be captured. So request GuC to update the log
* buffer state and then collect the left over logs.
*/
guc_flush_logs(guc);
/* As logging is disabled, update log level to reflect that */
i915.guc_log_level = -1;
} else {
/* In case interrupts were disabled, enable them now */
gen9_enable_guc_interrupts(dev_priv);
}
return ret;
}
void i915_guc_log_register(struct drm_i915_private *dev_priv)
{
if (!i915.enable_guc_submission)
return;
mutex_lock(&dev_priv->drm.struct_mutex);
guc_log_late_setup(&dev_priv->guc);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
void i915_guc_log_unregister(struct drm_i915_private *dev_priv)
{
if (!i915.enable_guc_submission)
return;
mutex_lock(&dev_priv->drm.struct_mutex);
guc_log_cleanup(&dev_priv->guc);
mutex_unlock(&dev_priv->drm.struct_mutex);
}

View File

@@ -0,0 +1,338 @@
/*
* Copyright © 2016-2017 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include <linux/firmware.h>
#include "i915_drv.h"
#include "intel_uc.h"
/**
* DOC: HuC Firmware
*
* Motivation:
* GEN9 introduces a new dedicated firmware for usage in media HEVC (High
* Efficiency Video Coding) operations. Userspace can use the firmware
* capabilities by adding HuC specific commands to batch buffers.
*
* Implementation:
* The same firmware loader is used as the GuC. However, the actual
* loading to HW is deferred until GEM initialization is done.
*
* Note that HuC firmware loading must be done before GuC loading.
*/
#define BXT_HUC_FW_MAJOR 01
#define BXT_HUC_FW_MINOR 07
#define BXT_BLD_NUM 1398
#define SKL_HUC_FW_MAJOR 01
#define SKL_HUC_FW_MINOR 07
#define SKL_BLD_NUM 1398
#define KBL_HUC_FW_MAJOR 02
#define KBL_HUC_FW_MINOR 00
#define KBL_BLD_NUM 1810
#define HUC_FW_PATH(platform, major, minor, bld_num) \
"i915/" __stringify(platform) "_huc_ver" __stringify(major) "_" \
__stringify(minor) "_" __stringify(bld_num) ".bin"
#define I915_SKL_HUC_UCODE HUC_FW_PATH(skl, SKL_HUC_FW_MAJOR, \
SKL_HUC_FW_MINOR, SKL_BLD_NUM)
MODULE_FIRMWARE(I915_SKL_HUC_UCODE);
#define I915_BXT_HUC_UCODE HUC_FW_PATH(bxt, BXT_HUC_FW_MAJOR, \
BXT_HUC_FW_MINOR, BXT_BLD_NUM)
MODULE_FIRMWARE(I915_BXT_HUC_UCODE);
#define I915_KBL_HUC_UCODE HUC_FW_PATH(kbl, KBL_HUC_FW_MAJOR, \
KBL_HUC_FW_MINOR, KBL_BLD_NUM)
MODULE_FIRMWARE(I915_KBL_HUC_UCODE);
/**
* huc_ucode_xfer() - DMA's the firmware
* @dev_priv: the drm_i915_private device
*
* Transfer the firmware image to RAM for execution by the microcontroller.
*
* Return: 0 on success, non-zero on failure
*/
static int huc_ucode_xfer(struct drm_i915_private *dev_priv)
{
struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
struct i915_vma *vma;
unsigned long offset = 0;
u32 size;
int ret;
ret = i915_gem_object_set_to_gtt_domain(huc_fw->obj, false);
if (ret) {
DRM_DEBUG_DRIVER("set-domain failed %d\n", ret);
return ret;
}
vma = i915_gem_object_ggtt_pin(huc_fw->obj, NULL, 0, 0,
PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
if (IS_ERR(vma)) {
DRM_DEBUG_DRIVER("pin failed %d\n", (int)PTR_ERR(vma));
return PTR_ERR(vma);
}
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
/* init WOPCM */
I915_WRITE(GUC_WOPCM_SIZE, intel_guc_wopcm_size(dev_priv));
I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE |
HUC_LOADING_AGENT_GUC);
/* Set the source address for the uCode */
offset = guc_ggtt_offset(vma) + huc_fw->header_offset;
I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
/* Hardware doesn't look at destination address for HuC. Set it to 0,
* but still program the correct address space.
*/
I915_WRITE(DMA_ADDR_1_LOW, 0);
I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
size = huc_fw->header_size + huc_fw->ucode_size;
I915_WRITE(DMA_COPY_SIZE, size);
/* Start the DMA */
I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA));
/* Wait for DMA to finish */
ret = wait_for((I915_READ(DMA_CTRL) & START_DMA) == 0, 100);
DRM_DEBUG_DRIVER("HuC DMA transfer wait over with ret %d\n", ret);
/* Disable the bits once DMA is over */
I915_WRITE(DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL));
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
/*
* We keep the object pages for reuse during resume. But we can unpin it
* now that DMA has completed, so it doesn't continue to take up space.
*/
i915_vma_unpin(vma);
return ret;
}
/**
* intel_huc_init() - initiate HuC firmware loading request
* @dev_priv: the drm_i915_private device
*
* Called early during driver load, but after GEM is initialised. The loading
* will continue only when driver explicitly specify firmware name and version.
* All other cases are considered as INTEL_UC_FIRMWARE_NONE either because HW
* is not capable or driver yet support it. And there will be no error message
* for INTEL_UC_FIRMWARE_NONE cases.
*
* The DMA-copying to HW is done later when intel_huc_load() is called.
*/
void intel_huc_init(struct drm_i915_private *dev_priv)
{
struct intel_huc *huc = &dev_priv->huc;
struct intel_uc_fw *huc_fw = &huc->fw;
const char *fw_path = NULL;
huc_fw->path = NULL;
huc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
huc_fw->load_status = INTEL_UC_FIRMWARE_NONE;
huc_fw->fw = INTEL_UC_FW_TYPE_HUC;
if (!HAS_HUC_UCODE(dev_priv))
return;
if (IS_SKYLAKE(dev_priv)) {
fw_path = I915_SKL_HUC_UCODE;
huc_fw->major_ver_wanted = SKL_HUC_FW_MAJOR;
huc_fw->minor_ver_wanted = SKL_HUC_FW_MINOR;
} else if (IS_BROXTON(dev_priv)) {
fw_path = I915_BXT_HUC_UCODE;
huc_fw->major_ver_wanted = BXT_HUC_FW_MAJOR;
huc_fw->minor_ver_wanted = BXT_HUC_FW_MINOR;
} else if (IS_KABYLAKE(dev_priv)) {
fw_path = I915_KBL_HUC_UCODE;
huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR;
huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR;
}
huc_fw->path = fw_path;
huc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
DRM_DEBUG_DRIVER("HuC firmware pending, path %s\n", fw_path);
WARN(huc_fw->path == NULL, "HuC present but no fw path\n");
intel_uc_fw_fetch(dev_priv, huc_fw);
}
/**
* intel_huc_load() - load HuC uCode to device
* @dev_priv: the drm_i915_private device
*
* Called from guc_setup() during driver loading and also after a GPU reset.
* Be note that HuC loading must be done before GuC loading.
*
* The firmware image should have already been fetched into memory by the
* earlier call to intel_huc_init(), so here we need only check that
* is succeeded, and then transfer the image to the h/w.
*
* Return: non-zero code on error
*/
int intel_huc_load(struct drm_i915_private *dev_priv)
{
struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
int err;
if (huc_fw->fetch_status == INTEL_UC_FIRMWARE_NONE)
return 0;
DRM_DEBUG_DRIVER("%s fw status: fetch %s, load %s\n",
huc_fw->path,
intel_uc_fw_status_repr(huc_fw->fetch_status),
intel_uc_fw_status_repr(huc_fw->load_status));
if (huc_fw->fetch_status == INTEL_UC_FIRMWARE_SUCCESS &&
huc_fw->load_status == INTEL_UC_FIRMWARE_FAIL)
return -ENOEXEC;
huc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
switch (huc_fw->fetch_status) {
case INTEL_UC_FIRMWARE_FAIL:
/* something went wrong :( */
err = -EIO;
goto fail;
case INTEL_UC_FIRMWARE_NONE:
case INTEL_UC_FIRMWARE_PENDING:
default:
/* "can't happen" */
WARN_ONCE(1, "HuC fw %s invalid fetch_status %s [%d]\n",
huc_fw->path,
intel_uc_fw_status_repr(huc_fw->fetch_status),
huc_fw->fetch_status);
err = -ENXIO;
goto fail;
case INTEL_UC_FIRMWARE_SUCCESS:
break;
}
err = huc_ucode_xfer(dev_priv);
if (err)
goto fail;
huc_fw->load_status = INTEL_UC_FIRMWARE_SUCCESS;
DRM_DEBUG_DRIVER("%s fw status: fetch %s, load %s\n",
huc_fw->path,
intel_uc_fw_status_repr(huc_fw->fetch_status),
intel_uc_fw_status_repr(huc_fw->load_status));
return 0;
fail:
if (huc_fw->load_status == INTEL_UC_FIRMWARE_PENDING)
huc_fw->load_status = INTEL_UC_FIRMWARE_FAIL;
DRM_ERROR("Failed to complete HuC uCode load with ret %d\n", err);
return err;
}
/**
* intel_huc_fini() - clean up resources allocated for HuC
* @dev_priv: the drm_i915_private device
*
* Cleans up by releasing the huc firmware GEM obj.
*/
void intel_huc_fini(struct drm_i915_private *dev_priv)
{
struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
mutex_lock(&dev_priv->drm.struct_mutex);
if (huc_fw->obj)
i915_gem_object_put(huc_fw->obj);
huc_fw->obj = NULL;
mutex_unlock(&dev_priv->drm.struct_mutex);
huc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
}
/**
* intel_guc_auth_huc() - authenticate ucode
* @dev_priv: the drm_i915_device
*
* Triggers a HuC fw authentication request to the GuC via intel_guc_action_
* authenticate_huc interface.
*/
void intel_guc_auth_huc(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
struct intel_huc *huc = &dev_priv->huc;
struct i915_vma *vma;
int ret;
u32 data[2];
if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
return;
vma = i915_gem_object_ggtt_pin(huc->fw.obj, NULL, 0, 0,
PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
if (IS_ERR(vma)) {
DRM_ERROR("failed to pin huc fw object %d\n",
(int)PTR_ERR(vma));
return;
}
/* Specify auth action and where public signature is. */
data[0] = INTEL_GUC_ACTION_AUTHENTICATE_HUC;
data[1] = guc_ggtt_offset(vma) + huc->fw.rsa_offset;
ret = intel_guc_send(guc, data, ARRAY_SIZE(data));
if (ret) {
DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret);
goto out;
}
/* Check authentication status, it should be done by now */
ret = intel_wait_for_register(dev_priv,
HUC_STATUS2,
HUC_FW_VERIFIED,
HUC_FW_VERIFIED,
50);
if (ret) {
DRM_ERROR("HuC: Authentication failed %d\n", ret);
goto out;
}
out:
i915_vma_unpin(vma);
}

View File

@@ -811,12 +811,6 @@ static int execlists_context_pin(struct intel_engine_cs *engine,
ce->state->obj->mm.dirty = true;
/* Invalidate GuC TLB. */
if (i915.enable_guc_submission) {
struct drm_i915_private *dev_priv = ctx->i915;
I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
}
i915_gem_context_get(ctx);
return 0;
@@ -1231,7 +1225,7 @@ static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
if (IS_ERR(obj))
return PTR_ERR(obj);
vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
@@ -1917,7 +1911,7 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
engine->emit_breadcrumb = gen8_emit_breadcrumb_render;
engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_render_sz;
ret = intel_engine_create_scratch(engine, 4096);
ret = intel_engine_create_scratch(engine, PAGE_SIZE);
if (ret)
return ret;
@@ -2093,19 +2087,12 @@ static void execlists_init_reg_state(u32 *reg_state,
ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0),
0);
if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
if (ppgtt && USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
/* 64b PPGTT (48bit canonical)
* PDP0_DESCRIPTOR contains the base address to PML4 and
* other PDP Descriptors are ignored.
*/
ASSIGN_CTX_PML4(ppgtt, reg_state);
} else {
/* 32b PPGTT
* PDP*_DESCRIPTOR contains the base address of space supported.
* With dynamic page allocation, PDPs may not be allocated at
* this point. Point the unallocated PDPs to the scratch page
*/
execlists_update_context_pdps(ppgtt, reg_state);
}
if (engine->id == RCS) {
@@ -2199,7 +2186,8 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
WARN_ON(ce->state);
context_size = round_up(intel_lr_context_size(engine), 4096);
context_size = round_up(intel_lr_context_size(engine),
I915_GTT_PAGE_SIZE);
/* One extra page as the sharing data between driver and GuC */
context_size += PAGE_SIZE * LRC_PPHWSP_PN;
@@ -2210,7 +2198,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
return PTR_ERR(ctx_obj);
}
vma = i915_vma_create(ctx_obj, &ctx->i915->ggtt.base, NULL);
vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto error_deref_obj;

View File

@@ -26,7 +26,7 @@
#include "intel_ringbuffer.h"
#define GEN8_LR_CONTEXT_ALIGN 4096
#define GEN8_LR_CONTEXT_ALIGN I915_GTT_MIN_ALIGNMENT
/* Execlists regs */
#define RING_ELSP(engine) _MMIO((engine)->mmio_base + 0x230)

View File

@@ -811,8 +811,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret != 0)
return ret;
vma = i915_gem_object_pin_to_display_plane(new_bo, 0,
&i915_ggtt_view_normal);
vma = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);

View File

@@ -560,14 +560,14 @@ static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
state = drm_atomic_state_alloc(dev);
if (!state) {
ret = -ENOMEM;
goto out;
goto unlock;
}
state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base);
pipe_config = intel_atomic_get_crtc_state(state, crtc);
if (IS_ERR(pipe_config)) {
ret = PTR_ERR(pipe_config);
goto out;
goto put_state;
}
pipe_config->pch_pfit.force_thru = enable;
@@ -576,10 +576,12 @@ static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
pipe_config->base.connectors_changed = true;
ret = drm_atomic_commit(state);
out:
put_state:
drm_atomic_state_put(state);
unlock:
WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
drm_modeset_unlock_all(dev);
drm_atomic_state_put(state);
}
static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
@@ -613,6 +615,22 @@ static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
return 0;
}
static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum pipe pipe,
enum intel_pipe_crc_source *source, u32 *val)
{
if (IS_GEN2(dev_priv))
return i8xx_pipe_crc_ctl_reg(source, val);
else if (INTEL_GEN(dev_priv) < 5)
return i9xx_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return vlv_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
return ilk_pipe_crc_ctl_reg(source, val);
else
return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
}
static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
enum pipe pipe,
enum intel_pipe_crc_source source)
@@ -636,17 +654,7 @@ static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
return -EIO;
}
if (IS_GEN2(dev_priv))
ret = i8xx_pipe_crc_ctl_reg(&source, &val);
else if (INTEL_GEN(dev_priv) < 5)
ret = i9xx_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
ret = vlv_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
ret = ilk_pipe_crc_ctl_reg(&source, &val);
else
ret = ivb_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
ret = get_new_crc_ctl_reg(dev_priv, pipe, &source, &val);
if (ret != 0)
goto out;
@@ -687,7 +695,7 @@ static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
POSTING_READ(PIPE_CRC_CTL(pipe));
/* real source -> none transition */
if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
if (!source) {
struct intel_pipe_crc_entry *entries;
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
pipe);
@@ -809,6 +817,11 @@ display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
{
int i;
if (!buf) {
*s = INTEL_PIPE_CRC_SOURCE_NONE;
return 0;
}
for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
if (!strcmp(buf, pipe_crc_sources[i])) {
*s = i;
@@ -937,3 +950,62 @@ void intel_pipe_crc_cleanup(struct drm_minor *minor)
drm_debugfs_remove_files(info_list, 1, minor);
}
}
int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
size_t *values_cnt)
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum intel_display_power_domain power_domain;
enum intel_pipe_crc_source source;
u32 val = 0; /* shut up gcc */
int ret = 0;
if (display_crc_ctl_parse_source(source_name, &source) < 0) {
DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
return -EINVAL;
}
power_domain = POWER_DOMAIN_PIPE(crtc->index);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
return -EIO;
}
ret = get_new_crc_ctl_reg(dev_priv, crtc->index, &source, &val);
if (ret != 0)
goto out;
if (source) {
/*
* When IPS gets enabled, the pipe CRC changes. Since IPS gets
* enabled and disabled dynamically based on package C states,
* user space can't make reliable use of the CRCs, so let's just
* completely disable it.
*/
hsw_disable_ips(intel_crtc);
}
I915_WRITE(PIPE_CRC_CTL(crtc->index), val);
POSTING_READ(PIPE_CRC_CTL(crtc->index));
if (!source) {
if (IS_G4X(dev_priv))
g4x_undo_pipe_scramble_reset(dev_priv, crtc->index);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_undo_pipe_scramble_reset(dev_priv, crtc->index);
else if (IS_HASWELL(dev_priv) && crtc->index == PIPE_A)
hsw_trans_edp_pipe_A_crc_wa(dev_priv, false);
hsw_enable_ips(intel_crtc);
}
pipe_crc->skipped = 0;
*values_cnt = 5;
out:
intel_display_power_put(dev_priv, power_domain);
return ret;
}

View File

@@ -122,13 +122,26 @@ static void vlv_psr_setup_vsc(struct intel_dp *intel_dp)
static void skl_psr_setup_su_vsc(struct intel_dp *intel_dp)
{
struct edp_vsc_psr psr_vsc;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
/* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
memset(&psr_vsc, 0, sizeof(psr_vsc));
psr_vsc.sdp_header.HB0 = 0;
psr_vsc.sdp_header.HB1 = 0x7;
psr_vsc.sdp_header.HB2 = 0x3;
psr_vsc.sdp_header.HB3 = 0xb;
if (dev_priv->psr.colorimetry_support &&
dev_priv->psr.y_cord_support) {
psr_vsc.sdp_header.HB2 = 0x5;
psr_vsc.sdp_header.HB3 = 0x13;
} else if (dev_priv->psr.y_cord_support) {
psr_vsc.sdp_header.HB2 = 0x4;
psr_vsc.sdp_header.HB3 = 0xe;
} else {
psr_vsc.sdp_header.HB2 = 0x3;
psr_vsc.sdp_header.HB3 = 0xc;
}
intel_psr_write_vsc(intel_dp, &psr_vsc);
}
@@ -196,7 +209,11 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
drm_dp_dpcd_writeb(&intel_dp->aux,
DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
DP_AUX_FRAME_SYNC_ENABLE);
/* Enable ALPM at sink for psr2 */
if (dev_priv->psr.psr2_support && dev_priv->psr.alpm)
drm_dp_dpcd_writeb(&intel_dp->aux,
DP_RECEIVER_ALPM_CONFIG,
DP_ALPM_ENABLE);
if (dev_priv->psr.link_standby)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
@@ -248,7 +265,7 @@ static void vlv_psr_activate(struct intel_dp *intel_dp)
VLV_EDP_PSR_ACTIVE_ENTRY);
}
static void hsw_psr_enable_source(struct intel_dp *intel_dp)
static void intel_enable_source_psr1(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
@@ -299,14 +316,31 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
val |= EDP_PSR_TP1_TP2_SEL;
I915_WRITE(EDP_PSR_CTL, val);
}
if (!dev_priv->psr.psr2_support)
return;
static void intel_enable_source_psr2(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
/*
* Let's respect VBT in case VBT asks a higher idle_frame value.
* Let's use 6 as the minimum to cover all known cases including
* the off-by-one issue that HW has in some cases. Also there are
* cases where sink should be able to train
* with the 5 or 6 idle patterns.
*/
uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
uint32_t val;
val = idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
/* FIXME: selective update is probably totally broken because it doesn't
* mesh at all with our frontbuffer tracking. And the hw alone isn't
* good enough. */
val = EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
val |= EDP_PSR2_ENABLE |
EDP_SU_TRACK_ENABLE |
EDP_FRAMES_BEFORE_SU_ENTRY;
if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
val |= EDP_PSR2_TP2_TIME_2500;
@@ -320,6 +354,19 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
I915_WRITE(EDP_PSR2_CTL, val);
}
static void hsw_psr_enable_source(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
/* psr1 and psr2 are mutually exclusive.*/
if (dev_priv->psr.psr2_support)
intel_enable_source_psr2(intel_dp);
else
intel_enable_source_psr1(intel_dp);
}
static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -387,6 +434,22 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
return false;
}
/* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
if (intel_crtc->config->pipe_src_w > 3200 ||
intel_crtc->config->pipe_src_h > 2000) {
dev_priv->psr.psr2_support = false;
return false;
}
/*
* FIXME:enable psr2 only for y-cordinate psr2 panels
* After gtc implementation , remove this restriction.
*/
if (!dev_priv->psr.y_cord_support && dev_priv->psr.psr2_support) {
DRM_DEBUG_KMS("PSR2 disabled, panel does not support Y coordinate\n");
return false;
}
dev_priv->psr.source_ok = true;
return true;
}
@@ -397,7 +460,10 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
if (dev_priv->psr.psr2_support)
WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
else
WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
WARN_ON(dev_priv->psr.active);
lockdep_assert_held(&dev_priv->psr.lock);
@@ -426,6 +492,8 @@ void intel_psr_enable(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
u32 chicken;
if (!HAS_PSR(dev_priv)) {
DRM_DEBUG_KMS("PSR not supported on this platform\n");
@@ -449,26 +517,34 @@ void intel_psr_enable(struct intel_dp *intel_dp)
dev_priv->psr.busy_frontbuffer_bits = 0;
if (HAS_DDI(dev_priv)) {
hsw_psr_setup_vsc(intel_dp);
if (dev_priv->psr.psr2_support) {
/* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
if (crtc->config->pipe_src_w > 3200 ||
crtc->config->pipe_src_h > 2000)
dev_priv->psr.psr2_support = false;
else
skl_psr_setup_su_vsc(intel_dp);
skl_psr_setup_su_vsc(intel_dp);
chicken = PSR2_VSC_ENABLE_PROG_HEADER;
if (dev_priv->psr.y_cord_support)
chicken |= PSR2_ADD_VERTICAL_LINE_COUNT;
I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken);
I915_WRITE(EDP_PSR_DEBUG_CTL,
EDP_PSR_DEBUG_MASK_MEMUP |
EDP_PSR_DEBUG_MASK_HPD |
EDP_PSR_DEBUG_MASK_LPSP |
EDP_PSR_DEBUG_MASK_MAX_SLEEP |
EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
} else {
/* set up vsc header for psr1 */
hsw_psr_setup_vsc(intel_dp);
/*
* Per Spec: Avoid continuous PSR exit by masking MEMUP
* and HPD. also mask LPSP to avoid dependency on other
* drivers that might block runtime_pm besides
* preventing other hw tracking issues now we can rely
* on frontbuffer tracking.
*/
I915_WRITE(EDP_PSR_DEBUG_CTL,
EDP_PSR_DEBUG_MASK_MEMUP |
EDP_PSR_DEBUG_MASK_HPD |
EDP_PSR_DEBUG_MASK_LPSP);
}
/*
* Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD.
* Also mask LPSP to avoid dependency on other drivers that
* might block runtime_pm besides preventing other hw tracking
* issues now we can rely on frontbuffer tracking.
*/
I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
/* Enable PSR on the panel */
hsw_psr_enable_sink(intel_dp);
@@ -544,20 +620,42 @@ static void hsw_psr_disable(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = to_i915(dev);
if (dev_priv->psr.active) {
I915_WRITE(EDP_PSR_CTL,
I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
i915_reg_t psr_ctl;
u32 psr_status_mask;
if (dev_priv->psr.aux_frame_sync)
drm_dp_dpcd_writeb(&intel_dp->aux,
DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
0);
if (dev_priv->psr.psr2_support) {
psr_ctl = EDP_PSR2_CTL;
psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
I915_WRITE(psr_ctl,
I915_READ(psr_ctl) &
~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE));
} else {
psr_ctl = EDP_PSR_STATUS_CTL;
psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
I915_WRITE(psr_ctl,
I915_READ(psr_ctl) & ~EDP_PSR_ENABLE);
}
/* Wait till PSR is idle */
if (intel_wait_for_register(dev_priv,
EDP_PSR_STATUS_CTL,
EDP_PSR_STATUS_STATE_MASK,
0,
psr_ctl, psr_status_mask, 0,
2000))
DRM_ERROR("Timed out waiting for PSR Idle State\n");
dev_priv->psr.active = false;
} else {
WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
if (dev_priv->psr.psr2_support)
WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
else
WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
}
}
@@ -608,13 +706,24 @@ static void intel_psr_work(struct work_struct *work)
* and be ready for re-enable.
*/
if (HAS_DDI(dev_priv)) {
if (intel_wait_for_register(dev_priv,
EDP_PSR_STATUS_CTL,
EDP_PSR_STATUS_STATE_MASK,
0,
50)) {
DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
return;
if (dev_priv->psr.psr2_support) {
if (intel_wait_for_register(dev_priv,
EDP_PSR2_STATUS_CTL,
EDP_PSR2_STATUS_STATE_MASK,
0,
50)) {
DRM_ERROR("Timed out waiting for PSR2 Idle for re-enable\n");
return;
}
} else {
if (intel_wait_for_register(dev_priv,
EDP_PSR_STATUS_CTL,
EDP_PSR_STATUS_STATE_MASK,
0,
50)) {
DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
return;
}
}
} else {
if (intel_wait_for_register(dev_priv,
@@ -656,11 +765,19 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv)
return;
if (HAS_DDI(dev_priv)) {
val = I915_READ(EDP_PSR_CTL);
WARN_ON(!(val & EDP_PSR_ENABLE));
I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
if (dev_priv->psr.aux_frame_sync)
drm_dp_dpcd_writeb(&intel_dp->aux,
DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
0);
if (dev_priv->psr.psr2_support) {
val = I915_READ(EDP_PSR2_CTL);
WARN_ON(!(val & EDP_PSR2_ENABLE));
I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
} else {
val = I915_READ(EDP_PSR_CTL);
WARN_ON(!(val & EDP_PSR_ENABLE));
I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
}
} else {
val = I915_READ(VLV_PSRCTL(pipe));

View File

@@ -1728,7 +1728,7 @@ static int init_status_page(struct intel_engine_cs *engine)
void *vaddr;
int ret;
obj = i915_gem_object_create_internal(engine->i915, 4096);
obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate status page\n");
return PTR_ERR(obj);
@@ -1738,7 +1738,7 @@ static int init_status_page(struct intel_engine_cs *engine)
if (ret)
goto err;
vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err;
@@ -1769,7 +1769,7 @@ static int init_status_page(struct intel_engine_cs *engine)
engine->status_page.vma = vma;
engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
engine->status_page.page_addr = memset(vaddr, 0, 4096);
engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE);
DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
engine->name, i915_ggtt_offset(vma));
@@ -1872,7 +1872,7 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
/* mark ring buffers as read-only from GPU side by default */
obj->gt_ro = 1;
vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
if (IS_ERR(vma))
goto err;
@@ -2041,7 +2041,7 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
}
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
ret = intel_ring_pin(ring, 4096);
ret = intel_ring_pin(ring, I915_GTT_PAGE_SIZE);
if (ret) {
intel_ring_free(ring);
goto error;
@@ -2458,11 +2458,11 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) {
struct i915_vma *vma;
obj = i915_gem_object_create(dev_priv, 4096);
obj = i915_gem_object_create(dev_priv, PAGE_SIZE);
if (IS_ERR(obj))
goto err;
vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
if (IS_ERR(vma))
goto err_obj;
@@ -2675,7 +2675,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
return ret;
if (INTEL_GEN(dev_priv) >= 6) {
ret = intel_engine_create_scratch(engine, 4096);
ret = intel_engine_create_scratch(engine, PAGE_SIZE);
if (ret)
return ret;
} else if (HAS_BROKEN_CS_TLB(dev_priv)) {

View File

@@ -273,7 +273,7 @@ skl_update_plane(struct drm_plane *drm_plane,
I915_WRITE(PLANE_CTL(pipe, plane_id), plane_ctl);
I915_WRITE(PLANE_SURF(pipe, plane_id),
intel_fb_gtt_offset(fb, rotation) + surf_addr);
intel_plane_ggtt_offset(plane_state) + surf_addr);
POSTING_READ(PLANE_SURF(pipe, plane_id));
}
@@ -458,7 +458,7 @@ vlv_update_plane(struct drm_plane *dplane,
I915_WRITE(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w);
I915_WRITE(SPCNTR(pipe, plane_id), sprctl);
I915_WRITE(SPSURF(pipe, plane_id),
intel_fb_gtt_offset(fb, rotation) + sprsurf_offset);
intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
POSTING_READ(SPSURF(pipe, plane_id));
}
@@ -594,7 +594,7 @@ ivb_update_plane(struct drm_plane *plane,
I915_WRITE(SPRSCALE(pipe), sprscale);
I915_WRITE(SPRCTL(pipe), sprctl);
I915_WRITE(SPRSURF(pipe),
intel_fb_gtt_offset(fb, rotation) + sprsurf_offset);
intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
POSTING_READ(SPRSURF(pipe));
}
@@ -721,7 +721,7 @@ ilk_update_plane(struct drm_plane *plane,
I915_WRITE(DVSSCALE(pipe), dvsscale);
I915_WRITE(DVSCNTR(pipe), dvscntr);
I915_WRITE(DVSSURF(pipe),
intel_fb_gtt_offset(fb, rotation) + dvssurf_offset);
intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
POSTING_READ(DVSSURF(pipe));
}

View File

@@ -114,29 +114,3 @@ int intel_guc_sample_forcewake(struct intel_guc *guc)
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
int intel_guc_log_flush_complete(struct intel_guc *guc)
{
u32 action[] = { INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE };
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
int intel_guc_log_flush(struct intel_guc *guc)
{
u32 action[] = {
INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH,
0
};
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
int intel_guc_log_control(struct intel_guc *guc, u32 control_val)
{
u32 action[] = {
INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING,
control_val
};
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}

View File

@@ -93,29 +93,35 @@ struct i915_guc_client {
uint64_t submissions[I915_NUM_ENGINES];
};
enum intel_guc_fw_status {
GUC_FIRMWARE_FAIL = -1,
GUC_FIRMWARE_NONE = 0,
GUC_FIRMWARE_PENDING,
GUC_FIRMWARE_SUCCESS
enum intel_uc_fw_status {
INTEL_UC_FIRMWARE_FAIL = -1,
INTEL_UC_FIRMWARE_NONE = 0,
INTEL_UC_FIRMWARE_PENDING,
INTEL_UC_FIRMWARE_SUCCESS
};
enum intel_uc_fw_type {
INTEL_UC_FW_TYPE_GUC,
INTEL_UC_FW_TYPE_HUC
};
/*
* This structure encapsulates all the data needed during the process
* of fetching, caching, and loading the firmware image into the GuC.
*/
struct intel_guc_fw {
const char * guc_fw_path;
size_t guc_fw_size;
struct drm_i915_gem_object * guc_fw_obj;
enum intel_guc_fw_status guc_fw_fetch_status;
enum intel_guc_fw_status guc_fw_load_status;
struct intel_uc_fw {
const char *path;
size_t size;
struct drm_i915_gem_object *obj;
enum intel_uc_fw_status fetch_status;
enum intel_uc_fw_status load_status;
uint16_t guc_fw_major_wanted;
uint16_t guc_fw_minor_wanted;
uint16_t guc_fw_major_found;
uint16_t guc_fw_minor_found;
uint16_t major_ver_wanted;
uint16_t minor_ver_wanted;
uint16_t major_ver_found;
uint16_t minor_ver_found;
enum intel_uc_fw_type fw;
uint32_t header_size;
uint32_t header_offset;
uint32_t rsa_size;
@@ -141,7 +147,7 @@ struct intel_guc_log {
};
struct intel_guc {
struct intel_guc_fw guc_fw;
struct intel_uc_fw fw;
struct intel_guc_log log;
/* intel_guc_recv interrupt related state */
@@ -170,21 +176,28 @@ struct intel_guc {
struct mutex send_mutex;
};
struct intel_huc {
/* Generic uC firmware management */
struct intel_uc_fw fw;
/* HuC-specific additions */
};
/* intel_uc.c */
void intel_uc_init_early(struct drm_i915_private *dev_priv);
int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len);
int intel_guc_sample_forcewake(struct intel_guc *guc);
int intel_guc_log_flush_complete(struct intel_guc *guc);
int intel_guc_log_flush(struct intel_guc *guc);
int intel_guc_log_control(struct intel_guc *guc, u32 control_val);
/* intel_guc_loader.c */
extern void intel_guc_init(struct drm_i915_private *dev_priv);
extern int intel_guc_setup(struct drm_i915_private *dev_priv);
extern void intel_guc_fini(struct drm_i915_private *dev_priv);
extern const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status);
extern const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status);
extern int intel_guc_suspend(struct drm_i915_private *dev_priv);
extern int intel_guc_resume(struct drm_i915_private *dev_priv);
void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
struct intel_uc_fw *uc_fw);
u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv);
/* i915_guc_submission.c */
int i915_guc_submission_init(struct drm_i915_private *dev_priv);
@@ -193,10 +206,12 @@ int i915_guc_wq_reserve(struct drm_i915_gem_request *rq);
void i915_guc_wq_unreserve(struct drm_i915_gem_request *request);
void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
void i915_guc_capture_logs(struct drm_i915_private *dev_priv);
void i915_guc_flush_logs(struct drm_i915_private *dev_priv);
void i915_guc_register(struct drm_i915_private *dev_priv);
void i915_guc_unregister(struct drm_i915_private *dev_priv);
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
/* intel_guc_log.c */
void intel_guc_log_create(struct intel_guc *guc);
void i915_guc_log_register(struct drm_i915_private *dev_priv);
void i915_guc_log_unregister(struct drm_i915_private *dev_priv);
int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
static inline u32 guc_ggtt_offset(struct i915_vma *vma)
@@ -207,4 +222,10 @@ static inline u32 guc_ggtt_offset(struct i915_vma *vma)
return offset;
}
/* intel_huc.c */
void intel_huc_init(struct drm_i915_private *dev_priv);
void intel_huc_fini(struct drm_i915_private *dev_priv);
int intel_huc_load(struct drm_i915_private *dev_priv);
void intel_guc_auth_huc(struct drm_i915_private *dev_priv);
#endif