Merge tag 'drm-intel-next-2012-12-21' of git://people.freedesktop.org/~danvet/drm-intel into drm-next
Daniel writes: - seqno wrap fixes and debug infrastructure from Mika Kuoppala and Chris Wilson - some leftover kill-agp on gen6+ patches from Ben - hotplug improvements from Damien - clear fb when allocated from stolen, avoids dirt on the fbcon (Chris) - Stolen mem support from Chris Wilson, one of the many steps to get to real fastboot support. - Some DDI code cleanups from Paulo. - Some refactorings around lvds and dp code. - some random little bits&pieces * tag 'drm-intel-next-2012-12-21' of git://people.freedesktop.org/~danvet/drm-intel: (93 commits) drm/i915: Return the real error code from intel_set_mode() drm/i915: Make GSM void drm/i915: Move GSM mapping into dev_priv drm/i915: Move even more gtt code to i915_gem_gtt drm/i915: Make next_seqno debugs entry to use i915_gem_set_seqno drm/i915: Introduce i915_gem_set_seqno() drm/i915: Always clear semaphore mboxes on seqno wrap drm/i915: Initialize hardware semaphore state on ring init drm/i915: Introduce ring set_seqno drm/i915: Missed conversion to gtt_pte_t drm/i915: Bug on unsupported swizzled platforms drm/i915: BUG() if fences are used on unsupported platform drm/i915: fixup overlay stolen memory leak drm/i915: clean up PIPECONF bpc #defines drm/i915: add intel_dp_set_signal_levels drm/i915: remove leftover display.update_wm assignment drm/i915: check for the PCH when setting pch_transcoder drm/i915: Clear the stolen fb before enabling drm/i915: Access to snooped system memory through the GTT is incoherent drm/i915: Remove stale comment about intel_dp_detect() ... Conflicts: drivers/gpu/drm/i915/intel_display.c
This commit is contained in:
@@ -163,8 +163,8 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data,
|
||||
return -ENODEV;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
i915_gem_init_global_gtt(dev, args->gtt_start,
|
||||
args->gtt_end, args->gtt_end);
|
||||
i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
|
||||
args->gtt_end);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return 0;
|
||||
@@ -192,6 +192,18 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *i915_gem_object_alloc(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
return kmem_cache_alloc(dev_priv->slab, GFP_KERNEL | __GFP_ZERO);
|
||||
}
|
||||
|
||||
void i915_gem_object_free(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
|
||||
kmem_cache_free(dev_priv->slab, obj);
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_create(struct drm_file *file,
|
||||
struct drm_device *dev,
|
||||
@@ -215,7 +227,7 @@ i915_gem_create(struct drm_file *file,
|
||||
if (ret) {
|
||||
drm_gem_object_release(&obj->base);
|
||||
i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
|
||||
kfree(obj);
|
||||
i915_gem_object_free(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -259,14 +271,6 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
|
||||
args->size, &args->handle);
|
||||
}
|
||||
|
||||
static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
|
||||
|
||||
return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
|
||||
obj->tiling_mode != I915_TILING_NONE;
|
||||
}
|
||||
|
||||
static inline int
|
||||
__copy_to_user_swizzled(char __user *cpu_vaddr,
|
||||
const char *gpu_vaddr, int gpu_offset,
|
||||
@@ -407,7 +411,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
|
||||
loff_t offset;
|
||||
int shmem_page_offset, page_length, ret = 0;
|
||||
int obj_do_bit17_swizzling, page_do_bit17_swizzling;
|
||||
int hit_slowpath = 0;
|
||||
int prefaulted = 0;
|
||||
int needs_clflush = 0;
|
||||
struct scatterlist *sg;
|
||||
@@ -469,7 +472,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
|
||||
if (ret == 0)
|
||||
goto next_page;
|
||||
|
||||
hit_slowpath = 1;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
if (!prefaulted) {
|
||||
@@ -502,12 +504,6 @@ next_page:
|
||||
out:
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
|
||||
if (hit_slowpath) {
|
||||
/* Fixup: Kill any reinstated backing storage pages */
|
||||
if (obj->madv == __I915_MADV_PURGED)
|
||||
i915_gem_object_truncate(obj);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -838,12 +834,13 @@ out:
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
|
||||
if (hit_slowpath) {
|
||||
/* Fixup: Kill any reinstated backing storage pages */
|
||||
if (obj->madv == __I915_MADV_PURGED)
|
||||
i915_gem_object_truncate(obj);
|
||||
/* and flush dirty cachelines in case the object isn't in the cpu write
|
||||
* domain anymore. */
|
||||
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
|
||||
/*
|
||||
* Fixup: Flush cpu caches in case we didn't flush the dirty
|
||||
* cachelines in-line while writing and the object moved
|
||||
* out of the cpu write domain while we've dropped the lock.
|
||||
*/
|
||||
if (!needs_clflush_after &&
|
||||
obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
|
||||
i915_gem_clflush_object(obj);
|
||||
i915_gem_chipset_flush(dev);
|
||||
}
|
||||
@@ -1344,6 +1341,12 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
|
||||
trace_i915_gem_object_fault(obj, page_offset, true, write);
|
||||
|
||||
/* Access to snoopable pages through the GTT is incoherent. */
|
||||
if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
|
||||
ret = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* Now bind it into the GTT if needed */
|
||||
ret = i915_gem_object_pin(obj, 0, true, false);
|
||||
if (ret)
|
||||
@@ -1933,30 +1936,24 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_handle_seqno_wrap(struct drm_device *dev)
|
||||
i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
int ret, i, j;
|
||||
|
||||
/* The hardware uses various monotonic 32-bit counters, if we
|
||||
* detect that they will wraparound we need to idle the GPU
|
||||
* and reset those counters.
|
||||
*/
|
||||
ret = 0;
|
||||
/* Carefully retire all requests without writing to the rings */
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
|
||||
ret |= ring->sync_seqno[j] != 0;
|
||||
ret = intel_ring_idle(ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
if (ret == 0)
|
||||
return ret;
|
||||
|
||||
ret = i915_gpu_idle(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
i915_gem_retire_requests(dev);
|
||||
|
||||
/* Finally reset hw state */
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
intel_ring_init_seqno(ring, seqno);
|
||||
|
||||
for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
|
||||
ring->sync_seqno[j] = 0;
|
||||
}
|
||||
@@ -1964,6 +1961,32 @@ i915_gem_handle_seqno_wrap(struct drm_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
if (seqno == 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* HWS page needs to be set less than what we
|
||||
* will inject to ring
|
||||
*/
|
||||
ret = i915_gem_init_seqno(dev, seqno - 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Carefully set the last_seqno value so that wrap
|
||||
* detection still works
|
||||
*/
|
||||
dev_priv->next_seqno = seqno;
|
||||
dev_priv->last_seqno = seqno - 1;
|
||||
if (dev_priv->last_seqno == 0)
|
||||
dev_priv->last_seqno--;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
|
||||
{
|
||||
@@ -1971,14 +1994,14 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
|
||||
|
||||
/* reserve 0 for non-seqno */
|
||||
if (dev_priv->next_seqno == 0) {
|
||||
int ret = i915_gem_handle_seqno_wrap(dev);
|
||||
int ret = i915_gem_init_seqno(dev, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dev_priv->next_seqno = 1;
|
||||
}
|
||||
|
||||
*seqno = dev_priv->next_seqno++;
|
||||
*seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -2648,7 +2671,7 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
|
||||
case 4: i965_write_fence_reg(dev, reg, obj); break;
|
||||
case 3: i915_write_fence_reg(dev, reg, obj); break;
|
||||
case 2: i830_write_fence_reg(dev, reg, obj); break;
|
||||
default: break;
|
||||
default: BUG();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2823,7 +2846,7 @@ static bool i915_gem_valid_gtt_space(struct drm_device *dev,
|
||||
|
||||
/* On non-LLC machines we have to be careful when putting differing
|
||||
* types of snoopable memory together to avoid the prefetcher
|
||||
* crossing memory domains and dieing.
|
||||
* crossing memory domains and dying.
|
||||
*/
|
||||
if (HAS_LLC(dev))
|
||||
return true;
|
||||
@@ -3698,14 +3721,14 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct address_space *mapping;
|
||||
u32 mask;
|
||||
gfp_t mask;
|
||||
|
||||
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
|
||||
obj = i915_gem_object_alloc(dev);
|
||||
if (obj == NULL)
|
||||
return NULL;
|
||||
|
||||
if (drm_gem_object_init(dev, &obj->base, size) != 0) {
|
||||
kfree(obj);
|
||||
i915_gem_object_free(obj);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -3777,6 +3800,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
|
||||
obj->pages_pin_count = 0;
|
||||
i915_gem_object_put_pages(obj);
|
||||
i915_gem_object_free_mmap_offset(obj);
|
||||
i915_gem_object_release_stolen(obj);
|
||||
|
||||
BUG_ON(obj->pages);
|
||||
|
||||
@@ -3787,7 +3811,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
|
||||
i915_gem_info_remove_obj(dev_priv, obj->base.size);
|
||||
|
||||
kfree(obj->bit_17);
|
||||
kfree(obj);
|
||||
i915_gem_object_free(obj);
|
||||
}
|
||||
|
||||
int
|
||||
@@ -3883,8 +3907,10 @@ void i915_gem_init_swizzling(struct drm_device *dev)
|
||||
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
|
||||
if (IS_GEN6(dev))
|
||||
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
|
||||
else
|
||||
else if (IS_GEN7(dev))
|
||||
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
|
||||
else
|
||||
BUG();
|
||||
}
|
||||
|
||||
static bool
|
||||
@@ -3919,6 +3945,8 @@ i915_gem_init_hw(struct drm_device *dev)
|
||||
|
||||
i915_gem_init_swizzling(dev);
|
||||
|
||||
dev_priv->next_seqno = dev_priv->last_seqno = (u32)~0 - 0x1000;
|
||||
|
||||
ret = intel_init_render_ring_buffer(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
@@ -3935,8 +3963,6 @@ i915_gem_init_hw(struct drm_device *dev)
|
||||
goto cleanup_bsd_ring;
|
||||
}
|
||||
|
||||
dev_priv->next_seqno = 1;
|
||||
|
||||
/*
|
||||
* XXX: There was some w/a described somewhere suggesting loading
|
||||
* contexts before PPGTT.
|
||||
@@ -3953,58 +3979,13 @@ cleanup_render_ring:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_enable_ppgtt(struct drm_device *dev)
|
||||
{
|
||||
if (i915_enable_ppgtt >= 0)
|
||||
return i915_enable_ppgtt;
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
/* Disable ppgtt on SNB if VT-d is on. */
|
||||
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
|
||||
return false;
|
||||
#endif
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int i915_gem_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long gtt_size, mappable_size;
|
||||
int ret;
|
||||
|
||||
gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
|
||||
mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
|
||||
/* PPGTT pdes are stolen from global gtt ptes, so shrink the
|
||||
* aperture accordingly when using aliasing ppgtt. */
|
||||
gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
|
||||
|
||||
i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
|
||||
|
||||
ret = i915_gem_init_aliasing_ppgtt(dev);
|
||||
if (ret) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
} else {
|
||||
/* Let GEM Manage all of the aperture.
|
||||
*
|
||||
* However, leave one page at the end still bound to the scratch
|
||||
* page. There are a number of places where the hardware
|
||||
* apparently prefetches past the end of the object, and we've
|
||||
* seen multiple hangs with the GPU head pointer stuck in a
|
||||
* batchbuffer bound at the last page of the aperture. One page
|
||||
* should be enough to keep any prefetching inside of the
|
||||
* aperture.
|
||||
*/
|
||||
i915_gem_init_global_gtt(dev, 0, mappable_size,
|
||||
gtt_size);
|
||||
}
|
||||
|
||||
i915_gem_init_global_gtt(dev);
|
||||
ret = i915_gem_init_hw(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (ret) {
|
||||
@@ -4105,8 +4086,14 @@ init_ring_lists(struct intel_ring_buffer *ring)
|
||||
void
|
||||
i915_gem_load(struct drm_device *dev)
|
||||
{
|
||||
int i;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
int i;
|
||||
|
||||
dev_priv->slab =
|
||||
kmem_cache_create("i915_gem_object",
|
||||
sizeof(struct drm_i915_gem_object), 0,
|
||||
SLAB_HWCACHE_ALIGN,
|
||||
NULL);
|
||||
|
||||
INIT_LIST_HEAD(&dev_priv->mm.active_list);
|
||||
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
|
||||
|
Reference in New Issue
Block a user