Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux

Pull drm updates from Dave Airlie:
 "This is the main drm pull request for 4.5.  I don't think I've missed
  anything too major, I'm mostly back at work now but I'll probably get
  some sleep in 5 years time.

  Summary:

  New drivers:
   - etnaviv:

     GPU driver for the 3D core on the Vivante core used in numerous
     ARM boards.

  Highlights:

  Core:
   - Atomic suspend/resume helpers
   - Move the headers to using userspace friendlier types.
   - Documentation updates
   - Lots of struct_mutex removal.
   - Bunch of DP MST fixes from AMD.

  Panel:
   - More DSI helpers
   - Support for some new basic panels

  i915:
   - Basic Kabylake support
   - DP link training and detect code refactoring
   - fbc/psr fixes
   - FIFO underrun fixes
   - SDE interrupt handling fixes
   - dma-buf/fence support in pageflip path.
   - GPU side for MST audio support

  radeon/amdgpu:
   - Drop UMS support
   - GPUVM/Scheduler optimisations
   - Initial Powerplay support for Tonga/Fiji/CZ/ST
   - ACP audio prerequisites

  nouveau:
   - GK20a instmem improvements
   - PCIE link speed change support

  msm:
   - DSI support for msm8960/apq8064

  tegra:
   - Host1X support for Tegra210 SoC

  vc4:
   - 3D acceleration support

  armada:
   - Get rid of struct mutex

  tda998x:
   - Atomic modesetting support
   - TMDS clock limitations

  omapdrm:
   - Atomic modesetting support
   - improved TILER performance

  rockchip:
   - RK3036 VOP support
   - Atomic modesetting support
   - Synopsys DW MIPI DSI support

  exynos:
   - Runtime PM support
   - of_graph binding for DP panels
   - Cleanup of IPP code
   - Configurable plane support
   - Kernel panic fixes at release time"

* 'drm-next' of git://people.freedesktop.org/~airlied/linux: (711 commits)
  drm/fb_cma_helper: Remove implicit call to disable_unused_functions
  drm/amdgpu: add missing irq.h include
  drm/vmwgfx: Fix a width / pitch mismatch on framebuffer updates
  drm/vmwgfx: Fix an incorrect lock check
  drm: nouveau: fix nouveau_debugfs_init prototype
  drm/nouveau/pci: fix check in nvkm_pcie_set_link
  drm/amdgpu: validate duplicates first
  drm/amdgpu: move VM page tables to the LRU end on CS v2
  drm/ttm: add ttm_bo_move_to_lru_tail function v2
  drm/ttm: fix adding foreign BOs to the swap LRU
  drm/ttm: fix adding foreign BOs to the LRU during init v2
  drm/radeon: use kobj_to_dev()
  drm/amdgpu: use kobj_to_dev()
  drm/amdgpu/cz: force vce clocks when sclks are forced
  drm/amdgpu/cz: force uvd clocks when sclks are forced
  drm/amdgpu/cz: add code to enable forcing VCE clocks
  drm/amdgpu/cz: add code to enable forcing UVD clocks
  drm/amdgpu: fix lost sync_to if scheduler is enabled.
  drm/amd/powerplay: fix static checker warning for return meaningless value.
  drm/sysfs: use kobj_to_dev()
  ...
This commit is contained in:
Linus Torvalds
2016-01-17 13:40:25 -08:00
711 changed files with 83802 additions and 27206 deletions

View File

@@ -26,24 +26,15 @@
#include "omap_drv.h"
#include "omap_dmm_tiler.h"
/* remove these once drm core helpers are merged */
struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
bool dirty, bool accessed);
int _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
/*
* GEM buffer object implementation.
*/
#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
/* note: we use upper 8 bits of flags for driver-internal flags: */
#define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */
#define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */
#define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */
#define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */
struct omap_gem_object {
struct drm_gem_object base;
@@ -120,8 +111,7 @@ struct omap_gem_object {
} *sync;
};
static int get_pages(struct drm_gem_object *obj, struct page ***pages);
static uint64_t mmap_offset(struct drm_gem_object *obj);
#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
* not necessarily pinned in TILER all the time, and (b) when they are
@@ -135,27 +125,69 @@ static uint64_t mmap_offset(struct drm_gem_object *obj);
* for later..
*/
#define NUM_USERGART_ENTRIES 2
struct usergart_entry {
struct omap_drm_usergart_entry {
struct tiler_block *block; /* the reserved tiler block */
dma_addr_t paddr;
struct drm_gem_object *obj; /* the current pinned obj */
pgoff_t obj_pgoff; /* page offset of obj currently
mapped in */
};
static struct {
struct usergart_entry entry[NUM_USERGART_ENTRIES];
struct omap_drm_usergart {
struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
int height; /* height in rows */
int height_shift; /* ilog2(height in rows) */
int slot_shift; /* ilog2(width per slot) */
int stride_pfn; /* stride in pages */
int last; /* index of last used entry */
} *usergart;
};
/* -----------------------------------------------------------------------------
* Helpers
*/
/** get mmap offset */
static uint64_t mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
int ret;
size_t size;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
/* Make it mmapable */
size = omap_gem_mmap_size(obj);
ret = drm_gem_create_mmap_offset_size(obj, size);
if (ret) {
dev_err(dev->dev, "could not allocate mmap offset\n");
return 0;
}
return drm_vma_node_offset_addr(&obj->vma_node);
}
/* GEM objects can either be allocated from contiguous memory (in which
* case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non
* contiguous buffers can be remapped in TILER/DMM if they need to be
* contiguous... but we don't do this all the time to reduce pressure
* on TILER/DMM space when we know at allocation time that the buffer
* will need to be scanned out.
*/
static inline bool is_shmem(struct drm_gem_object *obj)
{
return obj->filp != NULL;
}
/* -----------------------------------------------------------------------------
* Eviction
*/
static void evict_entry(struct drm_gem_object *obj,
enum tiler_fmt fmt, struct usergart_entry *entry)
enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int n = usergart[fmt].height;
struct omap_drm_private *priv = obj->dev->dev_private;
int n = priv->usergart[fmt].height;
size_t size = PAGE_SIZE * n;
loff_t off = mmap_offset(obj) +
(entry->obj_pgoff << PAGE_SHIFT);
@@ -181,46 +213,25 @@ static void evict_entry(struct drm_gem_object *obj,
static void evict(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
struct omap_drm_private *priv = obj->dev->dev_private;
if (omap_obj->flags & OMAP_BO_TILED) {
enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
int i;
if (!usergart)
return;
for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
struct usergart_entry *entry = &usergart[fmt].entry[i];
struct omap_drm_usergart_entry *entry =
&priv->usergart[fmt].entry[i];
if (entry->obj == obj)
evict_entry(obj, fmt, entry);
}
}
}
/* GEM objects can either be allocated from contiguous memory (in which
* case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non
* contiguous buffers can be remapped in TILER/DMM if they need to be
* contiguous... but we don't do this all the time to reduce pressure
* on TILER/DMM space when we know at allocation time that the buffer
* will need to be scanned out.
/* -----------------------------------------------------------------------------
* Page Management
*/
static inline bool is_shmem(struct drm_gem_object *obj)
{
return obj->filp != NULL;
}
/**
* shmem buffers that are mapped cached can simulate coherency via using
* page faulting to keep track of dirty pages
*/
static inline bool is_cached_coherent(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
return is_shmem(obj) &&
((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
}
static DEFINE_SPINLOCK(sync_lock);
/** ensure backing pages are allocated */
static int omap_gem_attach_pages(struct drm_gem_object *obj)
@@ -273,6 +284,28 @@ free_pages:
return ret;
}
/* acquire pages when needed (for example, for DMA where physically
* contiguous buffer is not required
*/
static int get_pages(struct drm_gem_object *obj, struct page ***pages)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = 0;
if (is_shmem(obj) && !omap_obj->pages) {
ret = omap_gem_attach_pages(obj);
if (ret) {
dev_err(obj->dev->dev, "could not attach pages\n");
return ret;
}
}
/* TODO: even phys-contig.. we should have a list of pages? */
*pages = omap_obj->pages;
return 0;
}
/** release backing pages */
static void omap_gem_detach_pages(struct drm_gem_object *obj)
{
@@ -302,26 +335,6 @@ uint32_t omap_gem_flags(struct drm_gem_object *obj)
return to_omap_bo(obj)->flags;
}
/** get mmap offset */
static uint64_t mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
int ret;
size_t size;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
/* Make it mmapable */
size = omap_gem_mmap_size(obj);
ret = drm_gem_create_mmap_offset_size(obj, size);
if (ret) {
dev_err(dev->dev, "could not allocate mmap offset\n");
return 0;
}
return drm_vma_node_offset_addr(&obj->vma_node);
}
uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
{
uint64_t offset;
@@ -363,6 +376,10 @@ int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h)
return -EINVAL;
}
/* -----------------------------------------------------------------------------
* Fault Handling
*/
/* Normal handling for the case of faulting in non-tiled buffers */
static int fault_1d(struct drm_gem_object *obj,
struct vm_area_struct *vma, struct vm_fault *vmf)
@@ -395,7 +412,8 @@ static int fault_2d(struct drm_gem_object *obj,
struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
struct usergart_entry *entry;
struct omap_drm_private *priv = obj->dev->dev_private;
struct omap_drm_usergart_entry *entry;
enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
struct page *pages[64]; /* XXX is this too much to have on stack? */
unsigned long pfn;
@@ -408,8 +426,8 @@ static int fault_2d(struct drm_gem_object *obj,
* that need to be mapped in to fill 4kb wide CPU page. If the slot
* height is 64, then 64 pages fill a 4kb wide by 64 row region.
*/
const int n = usergart[fmt].height;
const int n_shift = usergart[fmt].height_shift;
const int n = priv->usergart[fmt].height;
const int n_shift = priv->usergart[fmt].height_shift;
/*
* If buffer width in bytes > PAGE_SIZE then the virtual stride is
@@ -430,11 +448,11 @@ static int fault_2d(struct drm_gem_object *obj,
base_pgoff = round_down(pgoff, m << n_shift);
/* figure out buffer width in slots */
slots = omap_obj->width >> usergart[fmt].slot_shift;
slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
entry = &usergart[fmt].entry[usergart[fmt].last];
entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
/* evict previous buffer using this usergart entry, if any: */
if (entry->obj)
@@ -482,12 +500,13 @@ static int fault_2d(struct drm_gem_object *obj,
for (i = n; i > 0; i--) {
vm_insert_mixed(vma, (unsigned long)vaddr,
__pfn_to_pfn_t(pfn, PFN_DEV));
pfn += usergart[fmt].stride_pfn;
pfn += priv->usergart[fmt].stride_pfn;
vaddr += PAGE_SIZE * m;
}
/* simple round-robin: */
usergart[fmt].last = (usergart[fmt].last + 1) % NUM_USERGART_ENTRIES;
priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
% NUM_USERGART_ENTRIES;
return 0;
}
@@ -599,6 +618,9 @@ int omap_gem_mmap_obj(struct drm_gem_object *obj,
return 0;
}
/* -----------------------------------------------------------------------------
* Dumb Buffers
*/
/**
* omap_gem_dumb_create - create a dumb buffer
@@ -656,6 +678,7 @@ fail:
return ret;
}
#ifdef CONFIG_DRM_FBDEV_EMULATION
/* Set scrolling position. This allows us to implement fast scrolling
* for console.
*
@@ -692,6 +715,22 @@ fail:
return ret;
}
#endif
/* -----------------------------------------------------------------------------
* Memory Management & DMA Sync
*/
/**
* shmem buffers that are mapped cached can simulate coherency via using
* page faulting to keep track of dirty pages
*/
static inline bool is_cached_coherent(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
return is_shmem(obj) &&
((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
}
/* Sync the buffer for CPU access.. note pages should already be
* attached, ie. omap_gem_get_pages()
@@ -868,28 +907,6 @@ int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
return ret;
}
/* acquire pages when needed (for example, for DMA where physically
* contiguous buffer is not required
*/
static int get_pages(struct drm_gem_object *obj, struct page ***pages)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = 0;
if (is_shmem(obj) && !omap_obj->pages) {
ret = omap_gem_attach_pages(obj);
if (ret) {
dev_err(obj->dev->dev, "could not attach pages\n");
return ret;
}
}
/* TODO: even phys-contig.. we should have a list of pages? */
*pages = omap_obj->pages;
return 0;
}
/* if !remap, and we don't have pages backing, then fail, rather than
* increasing the pin count (which we don't really do yet anyways,
* because we don't support swapping pages back out). And 'remap'
@@ -927,6 +944,7 @@ int omap_gem_put_pages(struct drm_gem_object *obj)
return 0;
}
#ifdef CONFIG_DRM_FBDEV_EMULATION
/* Get kernel virtual address for CPU access.. this more or less only
* exists for omap_fbdev. This should be called with struct_mutex
* held.
@@ -945,6 +963,11 @@ void *omap_gem_vaddr(struct drm_gem_object *obj)
}
return omap_obj->vaddr;
}
#endif
/* -----------------------------------------------------------------------------
* Power Management
*/
#ifdef CONFIG_PM
/* re-pin objects in DMM in resume path: */
@@ -974,6 +997,10 @@ int omap_gem_resume(struct device *dev)
}
#endif
/* -----------------------------------------------------------------------------
* DebugFS
*/
#ifdef CONFIG_DEBUG_FS
void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
@@ -1020,9 +1047,12 @@ void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
}
#endif
/* Buffer Synchronization:
/* -----------------------------------------------------------------------------
* Buffer Synchronization
*/
static DEFINE_SPINLOCK(sync_lock);
struct omap_gem_sync_waiter {
struct list_head list;
struct omap_gem_object *omap_obj;
@@ -1268,6 +1298,10 @@ unlock:
return ret;
}
/* -----------------------------------------------------------------------------
* Constructor & Destructor
*/
/* don't call directly.. called from GEM core when it is time to actually
* free the object..
*/
@@ -1285,8 +1319,6 @@ void omap_gem_free_object(struct drm_gem_object *obj)
list_del(&omap_obj->mm_list);
spin_unlock(&priv->list_lock);
drm_gem_free_mmap_offset(obj);
/* this means the object is still pinned.. which really should
* not happen. I think..
*/
@@ -1311,31 +1343,7 @@ void omap_gem_free_object(struct drm_gem_object *obj)
drm_gem_object_release(obj);
kfree(obj);
}
/* convenience method to construct a GEM buffer object, and userspace handle */
int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
{
struct drm_gem_object *obj;
int ret;
obj = omap_gem_new(dev, gsize, flags);
if (!obj)
return -ENOMEM;
ret = drm_gem_handle_create(file, obj, handle);
if (ret) {
drm_gem_object_release(obj);
kfree(obj); /* TODO isn't there a dtor to call? just copying i915 */
return ret;
}
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(obj);
return 0;
kfree(omap_obj);
}
/* GEM buffer object constructor */
@@ -1344,15 +1352,15 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
{
struct omap_drm_private *priv = dev->dev_private;
struct omap_gem_object *omap_obj;
struct drm_gem_object *obj = NULL;
struct drm_gem_object *obj;
struct address_space *mapping;
size_t size;
int ret;
if (flags & OMAP_BO_TILED) {
if (!usergart) {
if (!priv->usergart) {
dev_err(dev->dev, "Tiled buffers require DMM\n");
goto fail;
return NULL;
}
/* tiled buffers are always shmem paged backed.. when they are
@@ -1423,16 +1431,42 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
return obj;
fail:
if (obj)
omap_gem_free_object(obj);
omap_gem_free_object(obj);
return NULL;
}
/* init/cleanup.. if DMM is used, we need to set some stuff up.. */
/* convenience method to construct a GEM buffer object, and userspace handle */
int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
{
struct drm_gem_object *obj;
int ret;
obj = omap_gem_new(dev, gsize, flags);
if (!obj)
return -ENOMEM;
ret = drm_gem_handle_create(file, obj, handle);
if (ret) {
omap_gem_free_object(obj);
return ret;
}
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(obj);
return 0;
}
/* -----------------------------------------------------------------------------
* Init & Cleanup
*/
/* If DMM is used, we need to set some stuff up.. */
void omap_gem_init(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
struct omap_drm_usergart *usergart;
const enum tiler_fmt fmts[] = {
TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
};
@@ -1461,10 +1495,11 @@ void omap_gem_init(struct drm_device *dev)
usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
struct usergart_entry *entry = &usergart[i].entry[j];
struct tiler_block *block =
tiler_reserve_2d(fmts[i], w, h,
PAGE_SIZE);
struct omap_drm_usergart_entry *entry;
struct tiler_block *block;
entry = &usergart[i].entry[j];
block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
if (IS_ERR(block)) {
dev_err(dev->dev,
"reserve failed: %d, %d, %ld\n",
@@ -1480,13 +1515,16 @@ void omap_gem_init(struct drm_device *dev)
}
}
priv->usergart = usergart;
priv->has_dmm = true;
}
void omap_gem_deinit(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
/* I believe we can rely on there being no more outstanding GEM
* objects which could depend on usergart/dmm at this point.
*/
kfree(usergart);
kfree(priv->usergart);
}