Merge drm/drm-next into drm-intel-next-queued

We need the rename of reservation_object to dma_resv.

The solution on this merge came from linux-next:
From: Stephen Rothwell <sfr@canb.auug.org.au>
Date: Wed, 14 Aug 2019 12:48:39 +1000
Subject: [PATCH] drm: fix up fallout from "dma-buf: rename reservation_object to dma_resv"

Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
---
 drivers/gpu/drm/i915/gt/intel_engine_pool.c | 8 ++++----
 3 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.c b/drivers/gpu/drm/i915/gt/intel_engine_pool.c
index 03d90b49584a..4cd54c569911 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pool.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.c
@@ -43,12 +43,12 @@ static int pool_active(struct i915_active *ref)
 {
        struct intel_engine_pool_node *node =
                container_of(ref, typeof(*node), active);
-       struct reservation_object *resv = node->obj->base.resv;
+       struct dma_resv *resv = node->obj->base.resv;
        int err;

-       if (reservation_object_trylock(resv)) {
-               reservation_object_add_excl_fence(resv, NULL);
-               reservation_object_unlock(resv);
+       if (dma_resv_trylock(resv)) {
+               dma_resv_add_excl_fence(resv, NULL);
+               dma_resv_unlock(resv);
        }

        err = i915_gem_object_pin_pages(node->obj);

which is a simplified version from a previous one which had:
Reviewed-by: Christian König <christian.koenig@amd.com>

Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
This commit is contained in:
Rodrigo Vivi
2019-08-21 22:47:35 -07:00
1747 changed files with 143350 additions and 23205 deletions

View File

@@ -82,7 +82,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_busy *args = data;
struct drm_i915_gem_object *obj;
struct reservation_object_list *list;
struct dma_resv_list *list;
unsigned int seq;
int err;
@@ -105,7 +105,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* Alternatively, we can trade that extra information on read/write
* activity with
* args->busy =
* !reservation_object_test_signaled_rcu(obj->resv, true);
* !dma_resv_test_signaled_rcu(obj->resv, true);
* to report the overall busyness. This is what the wait-ioctl does.
*
*/

View File

@@ -147,7 +147,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
true, I915_FENCE_TIMEOUT,
I915_FENCE_GFP);
reservation_object_add_excl_fence(obj->base.resv,
dma_resv_add_excl_fence(obj->base.resv,
&clflush->dma);
i915_sw_fence_commit(&clflush->wait);

View File

@@ -300,7 +300,7 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
if (err < 0) {
dma_fence_set_error(&work->dma, err);
} else {
reservation_object_add_excl_fence(obj->base.resv, &work->dma);
dma_resv_add_excl_fence(obj->base.resv, &work->dma);
err = 0;
}
i915_gem_object_unlock(obj);

View File

@@ -6,7 +6,7 @@
#include <linux/dma-buf.h>
#include <linux/highmem.h>
#include <linux/reservation.h>
#include <linux/dma-resv.h>
#include "i915_drv.h"
#include "i915_gem_object.h"
@@ -204,8 +204,7 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
.end_cpu_access = i915_gem_end_cpu_access,
};
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags)
struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
@@ -222,7 +221,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
return ERR_PTR(ret);
}
return drm_gem_dmabuf_export(dev, &exp_info);
return drm_gem_dmabuf_export(gem_obj->dev, &exp_info);
}
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)

View File

@@ -5,7 +5,7 @@
*/
#include <linux/intel-iommu.h>
#include <linux/reservation.h>
#include <linux/dma-resv.h>
#include <linux/sync_file.h>
#include <linux/uaccess.h>
@@ -1266,7 +1266,7 @@ relocate_entry(struct i915_vma *vma,
if (!eb->reloc_cache.vaddr &&
(DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
!reservation_object_test_signaled_rcu(vma->resv, true))) {
!dma_resv_test_signaled_rcu(vma->resv, true))) {
const unsigned int gen = eb->reloc_cache.gen;
unsigned int len;
u32 *batch;

View File

@@ -77,7 +77,7 @@ i915_gem_object_lock_fence(struct drm_i915_gem_object *obj)
I915_FENCE_GFP) < 0)
goto err;
reservation_object_add_excl_fence(obj->base.resv, &stub->dma);
dma_resv_add_excl_fence(obj->base.resv, &stub->dma);
return &stub->dma;

View File

@@ -339,7 +339,7 @@ err:
*/
if (!intel_gt_is_wedged(ggtt->vm.gt))
return VM_FAULT_SIGBUS;
/* else: fall through */
/* else, fall through */
case -EAGAIN:
/*
* EAGAIN means the gpu is hung and we'll wait for the error

View File

@@ -140,7 +140,7 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head)
container_of(head, typeof(*obj), rcu);
struct drm_i915_private *i915 = to_i915(obj->base.dev);
reservation_object_fini(&obj->base._resv);
dma_resv_fini(&obj->base._resv);
i915_gem_object_free(obj);
GEM_BUG_ON(!atomic_read(&i915->mm.free_count));

View File

@@ -99,22 +99,22 @@ i915_gem_object_put(struct drm_i915_gem_object *obj)
__drm_gem_object_put(&obj->base);
}
#define assert_object_held(obj) reservation_object_assert_held((obj)->base.resv)
#define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
{
reservation_object_lock(obj->base.resv, NULL);
dma_resv_lock(obj->base.resv, NULL);
}
static inline int
i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj)
{
return reservation_object_lock_interruptible(obj->base.resv, NULL);
return dma_resv_lock_interruptible(obj->base.resv, NULL);
}
static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
{
reservation_object_unlock(obj->base.resv);
dma_resv_unlock(obj->base.resv);
}
struct dma_fence *
@@ -367,7 +367,7 @@ i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
struct dma_fence *fence;
rcu_read_lock();
fence = reservation_object_get_excl_rcu(obj->base.resv);
fence = dma_resv_get_excl_rcu(obj->base.resv);
rcu_read_unlock();
if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))

View File

@@ -257,7 +257,7 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
switch (type) {
default:
MISSING_CASE(type);
/* fallthrough to use PAGE_KERNEL anyway */
/* fallthrough - to use PAGE_KERNEL anyway */
case I915_MAP_WB:
pgprot = PAGE_KERNEL;
break;

View File

@@ -31,11 +31,10 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
}
static long
i915_gem_object_wait_reservation(struct reservation_object *resv,
i915_gem_object_wait_reservation(struct dma_resv *resv,
unsigned int flags,
long timeout)
{
unsigned int seq = __read_seqcount_begin(&resv->seq);
struct dma_fence *excl;
bool prune_fences = false;
@@ -44,7 +43,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
unsigned int count, i;
int ret;
ret = reservation_object_get_fences_rcu(resv,
ret = dma_resv_get_fences_rcu(resv,
&excl, &count, &shared);
if (ret)
return ret;
@@ -73,7 +72,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
*/
prune_fences = count && timeout >= 0;
} else {
excl = reservation_object_get_excl_rcu(resv);
excl = dma_resv_get_excl_rcu(resv);
}
if (excl && timeout >= 0)
@@ -83,15 +82,12 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
/*
* Opportunistically prune the fences iff we know they have *all* been
* signaled and that the reservation object has not been changed (i.e.
* no new fences have been added).
* signaled.
*/
if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
if (reservation_object_trylock(resv)) {
if (!__read_seqcount_retry(&resv->seq, seq))
reservation_object_add_excl_fence(resv, NULL);
reservation_object_unlock(resv);
}
if (prune_fences && dma_resv_trylock(resv)) {
if (dma_resv_test_signaled_rcu(resv, true))
dma_resv_add_excl_fence(resv, NULL);
dma_resv_unlock(resv);
}
return timeout;
@@ -144,7 +140,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
unsigned int count, i;
int ret;
ret = reservation_object_get_fences_rcu(obj->base.resv,
ret = dma_resv_get_fences_rcu(obj->base.resv,
&excl, &count, &shared);
if (ret)
return ret;
@@ -156,7 +152,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
kfree(shared);
} else {
excl = reservation_object_get_excl_rcu(obj->base.resv);
excl = dma_resv_get_excl_rcu(obj->base.resv);
}
if (excl) {

View File

@@ -20,7 +20,7 @@ static int igt_dmabuf_export(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
dmabuf = i915_gem_prime_export(&obj->base, 0);
i915_gem_object_put(obj);
if (IS_ERR(dmabuf)) {
pr_err("i915_gem_prime_export failed with err=%d\n",
@@ -44,7 +44,7 @@ static int igt_dmabuf_import_self(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
dmabuf = i915_gem_prime_export(&obj->base, 0);
if (IS_ERR(dmabuf)) {
pr_err("i915_gem_prime_export failed with err=%d\n",
(int)PTR_ERR(dmabuf));
@@ -219,7 +219,7 @@ static int igt_dmabuf_export_vmap(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
dmabuf = i915_gem_prime_export(&obj->base, 0);
if (IS_ERR(dmabuf)) {
pr_err("i915_gem_prime_export failed with err=%d\n",
(int)PTR_ERR(dmabuf));
@@ -266,7 +266,7 @@ static int igt_dmabuf_export_kmap(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
dmabuf = i915_gem_prime_export(&obj->base, 0);
i915_gem_object_put(obj);
if (IS_ERR(dmabuf)) {
err = PTR_ERR(dmabuf);