dma-buf: Rename struct fence to dma_fence
I plan to usurp the short name of struct fence for a core kernel struct, and so I need to rename the specialised fence/timeline for DMA operations to make room. A consensus was reached in https://lists.freedesktop.org/archives/dri-devel/2016-July/113083.html that making clear this fence applies to DMA operations was a good thing. Since then the patch has grown a bit as usage increases, so hopefully it remains a good thing! (v2...: rebase, rerun spatch) v3: Compile on msm, spotted a manual fixup that I broke. v4: Try again for msm, sorry Daniel coccinelle script: @@ @@ - struct fence + struct dma_fence @@ @@ - struct fence_ops + struct dma_fence_ops @@ @@ - struct fence_cb + struct dma_fence_cb @@ @@ - struct fence_array + struct dma_fence_array @@ @@ - enum fence_flag_bits + enum dma_fence_flag_bits @@ @@ ( - fence_init + dma_fence_init | - fence_release + dma_fence_release | - fence_free + dma_fence_free | - fence_get + dma_fence_get | - fence_get_rcu + dma_fence_get_rcu | - fence_put + dma_fence_put | - fence_signal + dma_fence_signal | - fence_signal_locked + dma_fence_signal_locked | - fence_default_wait + dma_fence_default_wait | - fence_add_callback + dma_fence_add_callback | - fence_remove_callback + dma_fence_remove_callback | - fence_enable_sw_signaling + dma_fence_enable_sw_signaling | - fence_is_signaled_locked + dma_fence_is_signaled_locked | - fence_is_signaled + dma_fence_is_signaled | - fence_is_later + dma_fence_is_later | - fence_later + dma_fence_later | - fence_wait_timeout + dma_fence_wait_timeout | - fence_wait_any_timeout + dma_fence_wait_any_timeout | - fence_wait + dma_fence_wait | - fence_context_alloc + dma_fence_context_alloc | - fence_array_create + dma_fence_array_create | - to_fence_array + to_dma_fence_array | - fence_is_array + dma_fence_is_array | - trace_fence_emit + trace_dma_fence_emit | - FENCE_TRACE + DMA_FENCE_TRACE | - FENCE_WARN + DMA_FENCE_WARN | - FENCE_ERR + DMA_FENCE_ERR ) ( ... ) Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Gustavo Padovan <gustavo.padovan@collabora.co.uk> Acked-by: Sumit Semwal <sumit.semwal@linaro.org> Acked-by: Christian König <christian.koenig@amd.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/20161025120045.28839-1-chris@chris-wilson.co.uk
This commit is contained in:

committed by
Daniel Vetter

parent
0fc4f78f44
commit
f54d186700
@@ -102,17 +102,17 @@ EXPORT_SYMBOL(reservation_object_reserve_shared);
|
||||
static void
|
||||
reservation_object_add_shared_inplace(struct reservation_object *obj,
|
||||
struct reservation_object_list *fobj,
|
||||
struct fence *fence)
|
||||
struct dma_fence *fence)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
fence_get(fence);
|
||||
dma_fence_get(fence);
|
||||
|
||||
preempt_disable();
|
||||
write_seqcount_begin(&obj->seq);
|
||||
|
||||
for (i = 0; i < fobj->shared_count; ++i) {
|
||||
struct fence *old_fence;
|
||||
struct dma_fence *old_fence;
|
||||
|
||||
old_fence = rcu_dereference_protected(fobj->shared[i],
|
||||
reservation_object_held(obj));
|
||||
@@ -123,7 +123,7 @@ reservation_object_add_shared_inplace(struct reservation_object *obj,
|
||||
write_seqcount_end(&obj->seq);
|
||||
preempt_enable();
|
||||
|
||||
fence_put(old_fence);
|
||||
dma_fence_put(old_fence);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -143,12 +143,12 @@ static void
|
||||
reservation_object_add_shared_replace(struct reservation_object *obj,
|
||||
struct reservation_object_list *old,
|
||||
struct reservation_object_list *fobj,
|
||||
struct fence *fence)
|
||||
struct dma_fence *fence)
|
||||
{
|
||||
unsigned i;
|
||||
struct fence *old_fence = NULL;
|
||||
struct dma_fence *old_fence = NULL;
|
||||
|
||||
fence_get(fence);
|
||||
dma_fence_get(fence);
|
||||
|
||||
if (!old) {
|
||||
RCU_INIT_POINTER(fobj->shared[0], fence);
|
||||
@@ -165,7 +165,7 @@ reservation_object_add_shared_replace(struct reservation_object *obj,
|
||||
fobj->shared_count = old->shared_count;
|
||||
|
||||
for (i = 0; i < old->shared_count; ++i) {
|
||||
struct fence *check;
|
||||
struct dma_fence *check;
|
||||
|
||||
check = rcu_dereference_protected(old->shared[i],
|
||||
reservation_object_held(obj));
|
||||
@@ -196,7 +196,7 @@ done:
|
||||
kfree_rcu(old, rcu);
|
||||
|
||||
if (old_fence)
|
||||
fence_put(old_fence);
|
||||
dma_fence_put(old_fence);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -208,7 +208,7 @@ done:
|
||||
* reservation_object_reserve_shared() has been called.
|
||||
*/
|
||||
void reservation_object_add_shared_fence(struct reservation_object *obj,
|
||||
struct fence *fence)
|
||||
struct dma_fence *fence)
|
||||
{
|
||||
struct reservation_object_list *old, *fobj = obj->staged;
|
||||
|
||||
@@ -231,9 +231,9 @@ EXPORT_SYMBOL(reservation_object_add_shared_fence);
|
||||
* Add a fence to the exclusive slot. The obj->lock must be held.
|
||||
*/
|
||||
void reservation_object_add_excl_fence(struct reservation_object *obj,
|
||||
struct fence *fence)
|
||||
struct dma_fence *fence)
|
||||
{
|
||||
struct fence *old_fence = reservation_object_get_excl(obj);
|
||||
struct dma_fence *old_fence = reservation_object_get_excl(obj);
|
||||
struct reservation_object_list *old;
|
||||
u32 i = 0;
|
||||
|
||||
@@ -242,7 +242,7 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
|
||||
i = old->shared_count;
|
||||
|
||||
if (fence)
|
||||
fence_get(fence);
|
||||
dma_fence_get(fence);
|
||||
|
||||
preempt_disable();
|
||||
write_seqcount_begin(&obj->seq);
|
||||
@@ -255,11 +255,11 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
|
||||
|
||||
/* inplace update, no shared fences */
|
||||
while (i--)
|
||||
fence_put(rcu_dereference_protected(old->shared[i],
|
||||
dma_fence_put(rcu_dereference_protected(old->shared[i],
|
||||
reservation_object_held(obj)));
|
||||
|
||||
if (old_fence)
|
||||
fence_put(old_fence);
|
||||
dma_fence_put(old_fence);
|
||||
}
|
||||
EXPORT_SYMBOL(reservation_object_add_excl_fence);
|
||||
|
||||
@@ -276,12 +276,12 @@ EXPORT_SYMBOL(reservation_object_add_excl_fence);
|
||||
* Zero or -errno
|
||||
*/
|
||||
int reservation_object_get_fences_rcu(struct reservation_object *obj,
|
||||
struct fence **pfence_excl,
|
||||
struct dma_fence **pfence_excl,
|
||||
unsigned *pshared_count,
|
||||
struct fence ***pshared)
|
||||
struct dma_fence ***pshared)
|
||||
{
|
||||
struct fence **shared = NULL;
|
||||
struct fence *fence_excl;
|
||||
struct dma_fence **shared = NULL;
|
||||
struct dma_fence *fence_excl;
|
||||
unsigned int shared_count;
|
||||
int ret = 1;
|
||||
|
||||
@@ -296,12 +296,12 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
|
||||
seq = read_seqcount_begin(&obj->seq);
|
||||
|
||||
fence_excl = rcu_dereference(obj->fence_excl);
|
||||
if (fence_excl && !fence_get_rcu(fence_excl))
|
||||
if (fence_excl && !dma_fence_get_rcu(fence_excl))
|
||||
goto unlock;
|
||||
|
||||
fobj = rcu_dereference(obj->fence);
|
||||
if (fobj) {
|
||||
struct fence **nshared;
|
||||
struct dma_fence **nshared;
|
||||
size_t sz = sizeof(*shared) * fobj->shared_max;
|
||||
|
||||
nshared = krealloc(shared, sz,
|
||||
@@ -322,15 +322,15 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
|
||||
|
||||
for (i = 0; i < shared_count; ++i) {
|
||||
shared[i] = rcu_dereference(fobj->shared[i]);
|
||||
if (!fence_get_rcu(shared[i]))
|
||||
if (!dma_fence_get_rcu(shared[i]))
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
|
||||
while (i--)
|
||||
fence_put(shared[i]);
|
||||
fence_put(fence_excl);
|
||||
dma_fence_put(shared[i]);
|
||||
dma_fence_put(fence_excl);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
@@ -368,7 +368,7 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
|
||||
bool wait_all, bool intr,
|
||||
unsigned long timeout)
|
||||
{
|
||||
struct fence *fence;
|
||||
struct dma_fence *fence;
|
||||
unsigned seq, shared_count, i = 0;
|
||||
long ret = timeout;
|
||||
|
||||
@@ -389,16 +389,17 @@ retry:
|
||||
shared_count = fobj->shared_count;
|
||||
|
||||
for (i = 0; i < shared_count; ++i) {
|
||||
struct fence *lfence = rcu_dereference(fobj->shared[i]);
|
||||
struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
|
||||
|
||||
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags))
|
||||
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
|
||||
&lfence->flags))
|
||||
continue;
|
||||
|
||||
if (!fence_get_rcu(lfence))
|
||||
if (!dma_fence_get_rcu(lfence))
|
||||
goto unlock_retry;
|
||||
|
||||
if (fence_is_signaled(lfence)) {
|
||||
fence_put(lfence);
|
||||
if (dma_fence_is_signaled(lfence)) {
|
||||
dma_fence_put(lfence);
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -408,15 +409,16 @@ retry:
|
||||
}
|
||||
|
||||
if (!shared_count) {
|
||||
struct fence *fence_excl = rcu_dereference(obj->fence_excl);
|
||||
struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
|
||||
|
||||
if (fence_excl &&
|
||||
!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) {
|
||||
if (!fence_get_rcu(fence_excl))
|
||||
!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
|
||||
&fence_excl->flags)) {
|
||||
if (!dma_fence_get_rcu(fence_excl))
|
||||
goto unlock_retry;
|
||||
|
||||
if (fence_is_signaled(fence_excl))
|
||||
fence_put(fence_excl);
|
||||
if (dma_fence_is_signaled(fence_excl))
|
||||
dma_fence_put(fence_excl);
|
||||
else
|
||||
fence = fence_excl;
|
||||
}
|
||||
@@ -425,12 +427,12 @@ retry:
|
||||
rcu_read_unlock();
|
||||
if (fence) {
|
||||
if (read_seqcount_retry(&obj->seq, seq)) {
|
||||
fence_put(fence);
|
||||
dma_fence_put(fence);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
ret = fence_wait_timeout(fence, intr, ret);
|
||||
fence_put(fence);
|
||||
ret = dma_fence_wait_timeout(fence, intr, ret);
|
||||
dma_fence_put(fence);
|
||||
if (ret > 0 && wait_all && (i + 1 < shared_count))
|
||||
goto retry;
|
||||
}
|
||||
@@ -444,18 +446,18 @@ EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu);
|
||||
|
||||
|
||||
static inline int
|
||||
reservation_object_test_signaled_single(struct fence *passed_fence)
|
||||
reservation_object_test_signaled_single(struct dma_fence *passed_fence)
|
||||
{
|
||||
struct fence *fence, *lfence = passed_fence;
|
||||
struct dma_fence *fence, *lfence = passed_fence;
|
||||
int ret = 1;
|
||||
|
||||
if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
|
||||
fence = fence_get_rcu(lfence);
|
||||
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
|
||||
fence = dma_fence_get_rcu(lfence);
|
||||
if (!fence)
|
||||
return -1;
|
||||
|
||||
ret = !!fence_is_signaled(fence);
|
||||
fence_put(fence);
|
||||
ret = !!dma_fence_is_signaled(fence);
|
||||
dma_fence_put(fence);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@@ -492,7 +494,7 @@ retry:
|
||||
shared_count = fobj->shared_count;
|
||||
|
||||
for (i = 0; i < shared_count; ++i) {
|
||||
struct fence *fence = rcu_dereference(fobj->shared[i]);
|
||||
struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
|
||||
|
||||
ret = reservation_object_test_signaled_single(fence);
|
||||
if (ret < 0)
|
||||
@@ -506,7 +508,7 @@ retry:
|
||||
}
|
||||
|
||||
if (!shared_count) {
|
||||
struct fence *fence_excl = rcu_dereference(obj->fence_excl);
|
||||
struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
|
||||
|
||||
if (fence_excl) {
|
||||
ret = reservation_object_test_signaled_single(
|
||||
|
Reference in New Issue
Block a user