drm/i915: Pull i915_gem_active into the i915_active family
Looking forward, we need to break the struct_mutex dependency on i915_gem_active. In the meantime, external use of i915_gem_active is quite beguiling, little do new users suspect that it implies a barrier as each request it tracks must be ordered wrt the previous one. As one of many, it can be used to track activity across multiple timelines, a shared fence, which fits our unordered request submission much better. We need to steer external users away from the singular, exclusive fence imposed by i915_gem_active to i915_active instead. As part of that process, we move i915_gem_active out of i915_request.c into i915_active.c to start separating the two concepts, and rename it to i915_active_request (both to tie it to the concept of tracking just one request, and to give it a longer, less appealing name). Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190205130005.2807-5-chris@chris-wilson.co.uk
This commit is contained in:
@@ -21,7 +21,7 @@ static struct i915_global_active {
|
||||
} global;
|
||||
|
||||
struct active_node {
|
||||
struct i915_gem_active base;
|
||||
struct i915_active_request base;
|
||||
struct i915_active *ref;
|
||||
struct rb_node node;
|
||||
u64 timeline;
|
||||
@@ -33,7 +33,7 @@ __active_park(struct i915_active *ref)
|
||||
struct active_node *it, *n;
|
||||
|
||||
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
|
||||
GEM_BUG_ON(i915_gem_active_isset(&it->base));
|
||||
GEM_BUG_ON(i915_active_request_isset(&it->base));
|
||||
kmem_cache_free(global.slab_cache, it);
|
||||
}
|
||||
ref->tree = RB_ROOT;
|
||||
@@ -53,18 +53,18 @@ __active_retire(struct i915_active *ref)
|
||||
}
|
||||
|
||||
static void
|
||||
node_retire(struct i915_gem_active *base, struct i915_request *rq)
|
||||
node_retire(struct i915_active_request *base, struct i915_request *rq)
|
||||
{
|
||||
__active_retire(container_of(base, struct active_node, base)->ref);
|
||||
}
|
||||
|
||||
static void
|
||||
last_retire(struct i915_gem_active *base, struct i915_request *rq)
|
||||
last_retire(struct i915_active_request *base, struct i915_request *rq)
|
||||
{
|
||||
__active_retire(container_of(base, struct i915_active, last));
|
||||
}
|
||||
|
||||
static struct i915_gem_active *
|
||||
static struct i915_active_request *
|
||||
active_instance(struct i915_active *ref, u64 idx)
|
||||
{
|
||||
struct active_node *node;
|
||||
@@ -85,7 +85,7 @@ active_instance(struct i915_active *ref, u64 idx)
|
||||
* twice for the same timeline (as the older rbtree element will be
|
||||
* retired before the new request added to last).
|
||||
*/
|
||||
old = i915_gem_active_raw(&ref->last, BKL(ref));
|
||||
old = i915_active_request_raw(&ref->last, BKL(ref));
|
||||
if (!old || old->fence.context == idx)
|
||||
goto out;
|
||||
|
||||
@@ -110,7 +110,7 @@ active_instance(struct i915_active *ref, u64 idx)
|
||||
node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
|
||||
|
||||
/* kmalloc may retire the ref->last (thanks shrinker)! */
|
||||
if (unlikely(!i915_gem_active_raw(&ref->last, BKL(ref)))) {
|
||||
if (unlikely(!i915_active_request_raw(&ref->last, BKL(ref)))) {
|
||||
kmem_cache_free(global.slab_cache, node);
|
||||
goto out;
|
||||
}
|
||||
@@ -118,7 +118,7 @@ active_instance(struct i915_active *ref, u64 idx)
|
||||
if (unlikely(!node))
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
init_request_active(&node->base, node_retire);
|
||||
i915_active_request_init(&node->base, NULL, node_retire);
|
||||
node->ref = ref;
|
||||
node->timeline = idx;
|
||||
|
||||
@@ -133,7 +133,7 @@ replace:
|
||||
* callback not two, and so much undo the active counting for the
|
||||
* overwritten slot.
|
||||
*/
|
||||
if (i915_gem_active_isset(&node->base)) {
|
||||
if (i915_active_request_isset(&node->base)) {
|
||||
/* Retire ourselves from the old rq->active_list */
|
||||
__list_del_entry(&node->base.link);
|
||||
ref->count--;
|
||||
@@ -154,7 +154,7 @@ void i915_active_init(struct drm_i915_private *i915,
|
||||
ref->i915 = i915;
|
||||
ref->retire = retire;
|
||||
ref->tree = RB_ROOT;
|
||||
init_request_active(&ref->last, last_retire);
|
||||
i915_active_request_init(&ref->last, NULL, last_retire);
|
||||
ref->count = 0;
|
||||
}
|
||||
|
||||
@@ -162,15 +162,15 @@ int i915_active_ref(struct i915_active *ref,
|
||||
u64 timeline,
|
||||
struct i915_request *rq)
|
||||
{
|
||||
struct i915_gem_active *active;
|
||||
struct i915_active_request *active;
|
||||
|
||||
active = active_instance(ref, timeline);
|
||||
if (IS_ERR(active))
|
||||
return PTR_ERR(active);
|
||||
|
||||
if (!i915_gem_active_isset(active))
|
||||
if (!i915_active_request_isset(active))
|
||||
ref->count++;
|
||||
i915_gem_active_set(active, rq);
|
||||
__i915_active_request_set(active, rq);
|
||||
|
||||
GEM_BUG_ON(!ref->count);
|
||||
return 0;
|
||||
@@ -196,12 +196,12 @@ int i915_active_wait(struct i915_active *ref)
|
||||
if (i915_active_acquire(ref))
|
||||
goto out_release;
|
||||
|
||||
ret = i915_gem_active_retire(&ref->last, BKL(ref));
|
||||
ret = i915_active_request_retire(&ref->last, BKL(ref));
|
||||
if (ret)
|
||||
goto out_release;
|
||||
|
||||
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
|
||||
ret = i915_gem_active_retire(&it->base, BKL(ref));
|
||||
ret = i915_active_request_retire(&it->base, BKL(ref));
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
@@ -211,11 +211,11 @@ out_release:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __i915_request_await_active(struct i915_request *rq,
|
||||
struct i915_gem_active *active)
|
||||
int i915_request_await_active_request(struct i915_request *rq,
|
||||
struct i915_active_request *active)
|
||||
{
|
||||
struct i915_request *barrier =
|
||||
i915_gem_active_raw(active, &rq->i915->drm.struct_mutex);
|
||||
i915_active_request_raw(active, &rq->i915->drm.struct_mutex);
|
||||
|
||||
return barrier ? i915_request_await_dma_fence(rq, &barrier->fence) : 0;
|
||||
}
|
||||
@@ -225,12 +225,12 @@ int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
|
||||
struct active_node *it, *n;
|
||||
int ret;
|
||||
|
||||
ret = __i915_request_await_active(rq, &ref->last);
|
||||
ret = i915_request_await_active_request(rq, &ref->last);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
|
||||
ret = __i915_request_await_active(rq, &it->base);
|
||||
ret = i915_request_await_active_request(rq, &it->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@@ -241,12 +241,32 @@ int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
|
||||
void i915_active_fini(struct i915_active *ref)
|
||||
{
|
||||
GEM_BUG_ON(i915_gem_active_isset(&ref->last));
|
||||
GEM_BUG_ON(i915_active_request_isset(&ref->last));
|
||||
GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
|
||||
GEM_BUG_ON(ref->count);
|
||||
}
|
||||
#endif
|
||||
|
||||
int i915_active_request_set(struct i915_active_request *active,
|
||||
struct i915_request *rq)
|
||||
{
|
||||
int err;
|
||||
|
||||
/* Must maintain ordering wrt previous active requests */
|
||||
err = i915_request_await_active_request(rq, active);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
__i915_active_request_set(active, rq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void i915_active_retire_noop(struct i915_active_request *active,
|
||||
struct i915_request *request)
|
||||
{
|
||||
/* Space left intentionally blank */
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
||||
#include "selftests/i915_active.c"
|
||||
#endif
|
||||
|
Reference in New Issue
Block a user