Merge branch 'drm-intel-next' of git://people.freedesktop.org/~danvet/drm-intel into drm-core-next

Daniel Vetter wrote
First pull request for 3.5-next, slightly large than usual because new
things kept coming in since the last pull for 3.4.
Highlights:
- first batch of hw enablement for vlv (Jesse et al) and hsw (Eugeni). pci
 ids are not yet added, and there's still quite a few patches to merge
 (mostly modesetting). To make QA easier I've decided to merge this stuff
 in pieces.
- loads of cleanups and prep patches spurred by the above. Especially vlv
 is a real frankenstein chip, but also hsw is stretching our driver's
 code design. Expect more to come in this area for 3.5.
- more gmbus fixes, cleanups and improvements by Daniel Kurtz. Again,
 there are more patches needed (and some already queued up), but I wanted
 to split this a bit for better testing.
- pwrite/pread rework and retuning. This series has been in the works for
 a few months already and a lot of i-g-t tests have been created for it.
 Now it's finally ready to be merged.  Note that one patch in this series
 touches include/pagemap.h, that patch is acked-by akpm.
- reduce mappable pressure and relocation throughput improvements from
 Chris.
- mmap offset exhaustion mitigation by Chris Wilson.
- a start at figuring out which codepaths in our messy dri1/ums+gem/kms
 driver we actually need to support by bailing out of unsupported case.
 The driver now refuses to load without kms on gen6+ and disallows a few
 ioctls that userspace never used in certain cases. More of this will
 definitely come.
- More decoupling of global gtt and ppgtt.
- Improved dual-link lvds detection by Takashi Iwai.
- Shut up the compiler + plus fix the fallout (Ben)
- Inverted panel brightness handling (mostly Acer manages to break things
 in this way).
- Small fixlets and adjustements and some minor things to help debugging.

Regression-wise QA reported quite a few issues on ivb, but all of them
turned out to be hw stability issues which are already fixed in
drm-intel-fixes (QA runs the nightly regression tests on -next alone,
without -fixes automatically merged in). There's still one issue open on
snb, it looks like occlusion query writes are not quite as cache coherent
as we've expected. With some of the pwrite adjustements we can now
reliably hit this. Kernel workaround for it is in the works."

* 'drm-intel-next' of git://people.freedesktop.org/~danvet/drm-intel: (101 commits)
  drm/i915: VCS is not the last ring
  drm/i915: Add a dual link lvds quirk for MacBook Pro 8,2
  drm/i915: make quirks more verbose
  drm/i915: dump the DMA fetch addr register on pre-gen6
  drm/i915/sdvo: Include YRPB as an additional TV output type
  drm/i915: disallow gem init ioctl on ilk
  drm/i915: refuse to load on gen6+ without kms
  drm/i915: extract gt interrupt handler
  drm/i915: use render gen to switch ring irq functions
  drm/i915: rip out old HWSTAM missed irq WA for vlv
  drm/i915: open code gen6+ ring irqs
  drm/i915: ring irq cleanups
  drm/i915: add SFUSE_STRAP registers for digital port detection
  drm/i915: add WM_LINETIME registers
  drm/i915: add WRPLL clocks
  drm/i915: add LCPLL control registers
  drm/i915: add SSC offsets for SBI access
  drm/i915: add port clock selection support for HSW
  drm/i915: add S PLL control
  drm/i915: add PIXCLK_GATE register
  ...

Conflicts:
	drivers/char/agp/intel-agp.h
	drivers/char/agp/intel-gtt.c
	drivers/gpu/drm/i915/i915_debugfs.c
This commit is contained in:
Dave Airlie
2012-04-12 10:27:01 +01:00
33 changed files with 2584 additions and 1134 deletions

View File

@@ -290,9 +290,9 @@ static int init_ring_common(struct intel_ring_buffer *ring)
| RING_VALID);
/* If the head is still not zero, the ring is dead */
if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
I915_READ_START(ring) != obj->gtt_offset ||
(I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
I915_READ_START(ring) == obj->gtt_offset &&
(I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
DRM_ERROR("%s initialization failed "
"ctl %08x head %08x tail %08x start %08x\n",
ring->name,
@@ -687,7 +687,7 @@ render_ring_get_irq(struct intel_ring_buffer *ring)
spin_lock(&ring->irq_lock);
if (ring->irq_refcount++ == 0) {
if (HAS_PCH_SPLIT(dev))
if (INTEL_INFO(dev)->gen >= 5)
ironlake_enable_irq(dev_priv,
GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
else
@@ -706,7 +706,7 @@ render_ring_put_irq(struct intel_ring_buffer *ring)
spin_lock(&ring->irq_lock);
if (--ring->irq_refcount == 0) {
if (HAS_PCH_SPLIT(dev))
if (INTEL_INFO(dev)->gen >= 5)
ironlake_disable_irq(dev_priv,
GT_USER_INTERRUPT |
GT_PIPE_NOTIFY);
@@ -788,10 +788,11 @@ ring_add_request(struct intel_ring_buffer *ring,
}
static bool
gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
gen6_ring_get_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
u32 mask = ring->irq_enable;
if (!dev->irq_enabled)
return false;
@@ -803,9 +804,9 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
spin_lock(&ring->irq_lock);
if (ring->irq_refcount++ == 0) {
ring->irq_mask &= ~rflag;
ring->irq_mask &= ~mask;
I915_WRITE_IMR(ring, ring->irq_mask);
ironlake_enable_irq(dev_priv, gflag);
ironlake_enable_irq(dev_priv, mask);
}
spin_unlock(&ring->irq_lock);
@@ -813,16 +814,17 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
}
static void
gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
gen6_ring_put_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
u32 mask = ring->irq_enable;
spin_lock(&ring->irq_lock);
if (--ring->irq_refcount == 0) {
ring->irq_mask |= rflag;
ring->irq_mask |= mask;
I915_WRITE_IMR(ring, ring->irq_mask);
ironlake_disable_irq(dev_priv, gflag);
ironlake_disable_irq(dev_priv, mask);
}
spin_unlock(&ring->irq_lock);
@@ -1361,38 +1363,6 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
return 0;
}
static bool
gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
{
return gen6_ring_get_irq(ring,
GT_USER_INTERRUPT,
GEN6_RENDER_USER_INTERRUPT);
}
static void
gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
{
return gen6_ring_put_irq(ring,
GT_USER_INTERRUPT,
GEN6_RENDER_USER_INTERRUPT);
}
static bool
gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
{
return gen6_ring_get_irq(ring,
GT_GEN6_BSD_USER_INTERRUPT,
GEN6_BSD_USER_INTERRUPT);
}
static void
gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
{
return gen6_ring_put_irq(ring,
GT_GEN6_BSD_USER_INTERRUPT,
GEN6_BSD_USER_INTERRUPT);
}
/* ring buffer for Video Codec for Gen6+ */
static const struct intel_ring_buffer gen6_bsd_ring = {
.name = "gen6 bsd ring",
@@ -1404,8 +1374,9 @@ static const struct intel_ring_buffer gen6_bsd_ring = {
.flush = gen6_ring_flush,
.add_request = gen6_add_request,
.get_seqno = gen6_ring_get_seqno,
.irq_get = gen6_bsd_ring_get_irq,
.irq_put = gen6_bsd_ring_put_irq,
.irq_enable = GEN6_BSD_USER_INTERRUPT,
.irq_get = gen6_ring_get_irq,
.irq_put = gen6_ring_put_irq,
.dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
.sync_to = gen6_bsd_ring_sync_to,
.semaphore_register = {MI_SEMAPHORE_SYNC_VR,
@@ -1416,22 +1387,6 @@ static const struct intel_ring_buffer gen6_bsd_ring = {
/* Blitter support (SandyBridge+) */
static bool
blt_ring_get_irq(struct intel_ring_buffer *ring)
{
return gen6_ring_get_irq(ring,
GT_BLT_USER_INTERRUPT,
GEN6_BLITTER_USER_INTERRUPT);
}
static void
blt_ring_put_irq(struct intel_ring_buffer *ring)
{
gen6_ring_put_irq(ring,
GT_BLT_USER_INTERRUPT,
GEN6_BLITTER_USER_INTERRUPT);
}
static int blt_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate, u32 flush)
{
@@ -1463,8 +1418,9 @@ static const struct intel_ring_buffer gen6_blt_ring = {
.flush = blt_ring_flush,
.add_request = gen6_add_request,
.get_seqno = gen6_ring_get_seqno,
.irq_get = blt_ring_get_irq,
.irq_put = blt_ring_put_irq,
.irq_get = gen6_ring_get_irq,
.irq_put = gen6_ring_put_irq,
.irq_enable = GEN6_BLITTER_USER_INTERRUPT,
.dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
.sync_to = gen6_blt_ring_sync_to,
.semaphore_register = {MI_SEMAPHORE_SYNC_BR,
@@ -1482,8 +1438,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 6) {
ring->add_request = gen6_add_request;
ring->flush = gen6_render_ring_flush;
ring->irq_get = gen6_render_ring_get_irq;
ring->irq_put = gen6_render_ring_put_irq;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
ring->irq_enable = GT_USER_INTERRUPT;
ring->get_seqno = gen6_ring_get_seqno;
} else if (IS_GEN5(dev)) {
ring->add_request = pc_render_add_request;
@@ -1506,8 +1463,9 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
*ring = render_ring;
if (INTEL_INFO(dev)->gen >= 6) {
ring->add_request = gen6_add_request;
ring->irq_get = gen6_render_ring_get_irq;
ring->irq_put = gen6_render_ring_put_irq;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
ring->irq_enable = GT_USER_INTERRUPT;
} else if (IS_GEN5(dev)) {
ring->add_request = pc_render_add_request;
ring->get_seqno = pc_render_get_seqno;