dma-buf: Rename struct fence to dma_fence
I plan to usurp the short name of struct fence for a core kernel struct, and so I need to rename the specialised fence/timeline for DMA operations to make room. A consensus was reached in https://lists.freedesktop.org/archives/dri-devel/2016-July/113083.html that making clear this fence applies to DMA operations was a good thing. Since then the patch has grown a bit as usage increases, so hopefully it remains a good thing! (v2...: rebase, rerun spatch) v3: Compile on msm, spotted a manual fixup that I broke. v4: Try again for msm, sorry Daniel coccinelle script: @@ @@ - struct fence + struct dma_fence @@ @@ - struct fence_ops + struct dma_fence_ops @@ @@ - struct fence_cb + struct dma_fence_cb @@ @@ - struct fence_array + struct dma_fence_array @@ @@ - enum fence_flag_bits + enum dma_fence_flag_bits @@ @@ ( - fence_init + dma_fence_init | - fence_release + dma_fence_release | - fence_free + dma_fence_free | - fence_get + dma_fence_get | - fence_get_rcu + dma_fence_get_rcu | - fence_put + dma_fence_put | - fence_signal + dma_fence_signal | - fence_signal_locked + dma_fence_signal_locked | - fence_default_wait + dma_fence_default_wait | - fence_add_callback + dma_fence_add_callback | - fence_remove_callback + dma_fence_remove_callback | - fence_enable_sw_signaling + dma_fence_enable_sw_signaling | - fence_is_signaled_locked + dma_fence_is_signaled_locked | - fence_is_signaled + dma_fence_is_signaled | - fence_is_later + dma_fence_is_later | - fence_later + dma_fence_later | - fence_wait_timeout + dma_fence_wait_timeout | - fence_wait_any_timeout + dma_fence_wait_any_timeout | - fence_wait + dma_fence_wait | - fence_context_alloc + dma_fence_context_alloc | - fence_array_create + dma_fence_array_create | - to_fence_array + to_dma_fence_array | - fence_is_array + dma_fence_is_array | - trace_fence_emit + trace_dma_fence_emit | - FENCE_TRACE + DMA_FENCE_TRACE | - FENCE_WARN + DMA_FENCE_WARN | - FENCE_ERR + DMA_FENCE_ERR ) ( ... ) Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Gustavo Padovan <gustavo.padovan@collabora.co.uk> Acked-by: Sumit Semwal <sumit.semwal@linaro.org> Acked-by: Christian König <christian.koenig@amd.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/20161025120045.28839-1-chris@chris-wilson.co.uk
This commit is contained in:
@@ -466,10 +466,10 @@ int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
static void etnaviv_gem_describe_fence(struct fence *fence,
|
||||
static void etnaviv_gem_describe_fence(struct dma_fence *fence,
|
||||
const char *type, struct seq_file *m)
|
||||
{
|
||||
if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
|
||||
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
|
||||
seq_printf(m, "\t%9s: %s %s seq %u\n",
|
||||
type,
|
||||
fence->ops->get_driver_name(fence),
|
||||
@@ -482,7 +482,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
|
||||
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
|
||||
struct reservation_object *robj = etnaviv_obj->resv;
|
||||
struct reservation_object_list *fobj;
|
||||
struct fence *fence;
|
||||
struct dma_fence *fence;
|
||||
unsigned long off = drm_vma_node_start(&obj->vma_node);
|
||||
|
||||
seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
|
||||
|
@@ -15,7 +15,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/component.h>
|
||||
#include <linux/fence.h>
|
||||
#include <linux/dma-fence.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/of_device.h>
|
||||
#include "etnaviv_dump.h"
|
||||
@@ -882,7 +882,7 @@ static void recover_worker(struct work_struct *work)
|
||||
for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
|
||||
if (!gpu->event[i].used)
|
||||
continue;
|
||||
fence_signal(gpu->event[i].fence);
|
||||
dma_fence_signal(gpu->event[i].fence);
|
||||
gpu->event[i].fence = NULL;
|
||||
gpu->event[i].used = false;
|
||||
complete(&gpu->event_free);
|
||||
@@ -952,55 +952,55 @@ static void hangcheck_disable(struct etnaviv_gpu *gpu)
|
||||
/* fence object management */
|
||||
struct etnaviv_fence {
|
||||
struct etnaviv_gpu *gpu;
|
||||
struct fence base;
|
||||
struct dma_fence base;
|
||||
};
|
||||
|
||||
static inline struct etnaviv_fence *to_etnaviv_fence(struct fence *fence)
|
||||
static inline struct etnaviv_fence *to_etnaviv_fence(struct dma_fence *fence)
|
||||
{
|
||||
return container_of(fence, struct etnaviv_fence, base);
|
||||
}
|
||||
|
||||
static const char *etnaviv_fence_get_driver_name(struct fence *fence)
|
||||
static const char *etnaviv_fence_get_driver_name(struct dma_fence *fence)
|
||||
{
|
||||
return "etnaviv";
|
||||
}
|
||||
|
||||
static const char *etnaviv_fence_get_timeline_name(struct fence *fence)
|
||||
static const char *etnaviv_fence_get_timeline_name(struct dma_fence *fence)
|
||||
{
|
||||
struct etnaviv_fence *f = to_etnaviv_fence(fence);
|
||||
|
||||
return dev_name(f->gpu->dev);
|
||||
}
|
||||
|
||||
static bool etnaviv_fence_enable_signaling(struct fence *fence)
|
||||
static bool etnaviv_fence_enable_signaling(struct dma_fence *fence)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool etnaviv_fence_signaled(struct fence *fence)
|
||||
static bool etnaviv_fence_signaled(struct dma_fence *fence)
|
||||
{
|
||||
struct etnaviv_fence *f = to_etnaviv_fence(fence);
|
||||
|
||||
return fence_completed(f->gpu, f->base.seqno);
|
||||
}
|
||||
|
||||
static void etnaviv_fence_release(struct fence *fence)
|
||||
static void etnaviv_fence_release(struct dma_fence *fence)
|
||||
{
|
||||
struct etnaviv_fence *f = to_etnaviv_fence(fence);
|
||||
|
||||
kfree_rcu(f, base.rcu);
|
||||
}
|
||||
|
||||
static const struct fence_ops etnaviv_fence_ops = {
|
||||
static const struct dma_fence_ops etnaviv_fence_ops = {
|
||||
.get_driver_name = etnaviv_fence_get_driver_name,
|
||||
.get_timeline_name = etnaviv_fence_get_timeline_name,
|
||||
.enable_signaling = etnaviv_fence_enable_signaling,
|
||||
.signaled = etnaviv_fence_signaled,
|
||||
.wait = fence_default_wait,
|
||||
.wait = dma_fence_default_wait,
|
||||
.release = etnaviv_fence_release,
|
||||
};
|
||||
|
||||
static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
|
||||
static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
|
||||
{
|
||||
struct etnaviv_fence *f;
|
||||
|
||||
@@ -1010,8 +1010,8 @@ static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
|
||||
|
||||
f->gpu = gpu;
|
||||
|
||||
fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
|
||||
gpu->fence_context, ++gpu->next_fence);
|
||||
dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
|
||||
gpu->fence_context, ++gpu->next_fence);
|
||||
|
||||
return &f->base;
|
||||
}
|
||||
@@ -1021,7 +1021,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
|
||||
{
|
||||
struct reservation_object *robj = etnaviv_obj->resv;
|
||||
struct reservation_object_list *fobj;
|
||||
struct fence *fence;
|
||||
struct dma_fence *fence;
|
||||
int i, ret;
|
||||
|
||||
if (!exclusive) {
|
||||
@@ -1039,7 +1039,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
|
||||
/* Wait on any existing exclusive fence which isn't our own */
|
||||
fence = reservation_object_get_excl(robj);
|
||||
if (fence && fence->context != context) {
|
||||
ret = fence_wait(fence, true);
|
||||
ret = dma_fence_wait(fence, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@@ -1052,7 +1052,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
|
||||
fence = rcu_dereference_protected(fobj->shared[i],
|
||||
reservation_object_held(robj));
|
||||
if (fence->context != context) {
|
||||
ret = fence_wait(fence, true);
|
||||
ret = dma_fence_wait(fence, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@@ -1158,11 +1158,11 @@ static void retire_worker(struct work_struct *work)
|
||||
|
||||
mutex_lock(&gpu->lock);
|
||||
list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) {
|
||||
if (!fence_is_signaled(cmdbuf->fence))
|
||||
if (!dma_fence_is_signaled(cmdbuf->fence))
|
||||
break;
|
||||
|
||||
list_del(&cmdbuf->node);
|
||||
fence_put(cmdbuf->fence);
|
||||
dma_fence_put(cmdbuf->fence);
|
||||
|
||||
for (i = 0; i < cmdbuf->nr_bos; i++) {
|
||||
struct etnaviv_vram_mapping *mapping = cmdbuf->bo_map[i];
|
||||
@@ -1275,7 +1275,7 @@ void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu)
|
||||
int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
|
||||
struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf)
|
||||
{
|
||||
struct fence *fence;
|
||||
struct dma_fence *fence;
|
||||
unsigned int event, i;
|
||||
int ret;
|
||||
|
||||
@@ -1391,7 +1391,7 @@ static irqreturn_t irq_handler(int irq, void *data)
|
||||
}
|
||||
|
||||
while ((event = ffs(intr)) != 0) {
|
||||
struct fence *fence;
|
||||
struct dma_fence *fence;
|
||||
|
||||
event -= 1;
|
||||
|
||||
@@ -1401,7 +1401,7 @@ static irqreturn_t irq_handler(int irq, void *data)
|
||||
|
||||
fence = gpu->event[event].fence;
|
||||
gpu->event[event].fence = NULL;
|
||||
fence_signal(fence);
|
||||
dma_fence_signal(fence);
|
||||
|
||||
/*
|
||||
* Events can be processed out of order. Eg,
|
||||
@@ -1553,7 +1553,7 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
|
||||
return ret;
|
||||
|
||||
gpu->drm = drm;
|
||||
gpu->fence_context = fence_context_alloc(1);
|
||||
gpu->fence_context = dma_fence_context_alloc(1);
|
||||
spin_lock_init(&gpu->fence_spinlock);
|
||||
|
||||
INIT_LIST_HEAD(&gpu->active_cmd_list);
|
||||
|
@@ -89,7 +89,7 @@ struct etnaviv_chip_identity {
|
||||
|
||||
struct etnaviv_event {
|
||||
bool used;
|
||||
struct fence *fence;
|
||||
struct dma_fence *fence;
|
||||
};
|
||||
|
||||
struct etnaviv_cmdbuf;
|
||||
@@ -163,7 +163,7 @@ struct etnaviv_cmdbuf {
|
||||
/* vram node used if the cmdbuf is mapped through the MMUv2 */
|
||||
struct drm_mm_node vram_node;
|
||||
/* fence after which this buffer is to be disposed */
|
||||
struct fence *fence;
|
||||
struct dma_fence *fence;
|
||||
/* target exec state */
|
||||
u32 exec_state;
|
||||
/* per GPU in-flight list */
|
||||
|
Reference in New Issue
Block a user