
[ 236.821534] WARNING: kmemcheck: Caught 64-bit read from uninitialized memory (ffff8802538683d0) [ 236.828642] 420000001e7f0000000000000000000000080000000000000000000000000000 [ 236.839543] i i i i u u u u i i i i i i i i u u u u u u u u u u u u u u u u [ 236.850420] ^ [ 236.854123] RIP: 0010:[<ffffffff81396f07>] [<ffffffff81396f07>] fence_signal+0x17/0xd0 [ 236.861313] RSP: 0018:ffff88024acd7ba0 EFLAGS: 00010282 [ 236.865027] RAX: ffffffff812f6a90 RBX: ffff8802527ca800 RCX: ffff880252cb30e0 [ 236.868801] RDX: ffff88024ac5d918 RSI: ffff880252f780e0 RDI: ffff880253868380 [ 236.872579] RBP: ffff88024acd7bc0 R08: ffff88024acd7be0 R09: 0000000000000000 [ 236.876407] R10: 0000000000000000 R11: 0000000000000000 R12: ffff880253868380 [ 236.880185] R13: ffff8802538684d0 R14: ffff880253868380 R15: ffff88024cd48e00 [ 236.883983] FS: 00007f1646d1a740(0000) GS:ffff88025d000000(0000) knlGS:0000000000000000 [ 236.890959] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 236.894702] CR2: ffff880251360318 CR3: 000000024ad21000 CR4: 00000000001406f0 [ 236.898481] [<ffffffff8130d1ad>] i915_gem_request_retire+0x1cd/0x230 [ 236.902439] [<ffffffff8130e2b3>] i915_gem_request_alloc+0xa3/0x2f0 [ 236.906435] [<ffffffff812fb1bd>] i915_gem_do_execbuffer.isra.41+0xb6d/0x18b0 [ 236.910434] [<ffffffff812fc265>] i915_gem_execbuffer2+0x95/0x1e0 [ 236.914390] [<ffffffff812ad625>] drm_ioctl+0x1e5/0x460 [ 236.918275] [<ffffffff8110d4cf>] do_vfs_ioctl+0x8f/0x5c0 [ 236.922168] [<ffffffff8110da3c>] SyS_ioctl+0x3c/0x70 [ 236.926090] [<ffffffff814b7a5f>] entry_SYSCALL_64_fastpath+0x17/0x93 [ 236.930045] [<ffffffffffffffff>] 0xffffffffffffffff We only set the timestamp before we mark the fence as signaled. It is done before to avoid observers having a window in which they may see the fence as complete but no timestamp. Having it does incur a potential for the timestamp to be written twice, and even for it to be corrupted if the u64 write is not atomic. Instead use a new bit to record the presence of the timestamp, and teach the readers to wait until it is set if the fence is complete. There still remains a race where the timestamp for the signaled fence may be shown before the fence is reported as signaled, but that's a pre-existing error. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Sumit Semwal <sumit.semwal@linaro.org> Cc: Gustavo Padovan <gustavo@padovan.org> Cc: Daniel Vetter <daniel.vetter@intel.com> Reported-by: Rafael Antognolli <rafael.antognolli@intel.com> Signed-off-by: Gustavo Padovan <gustavo.padovan@collabora.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170214124001.1930-1-chris@chris-wilson.co.uk
237 lines
5.8 KiB
C
237 lines
5.8 KiB
C
/*
|
|
* Sync File validation framework and debug information
|
|
*
|
|
* Copyright (C) 2012 Google, Inc.
|
|
*
|
|
* This software is licensed under the terms of the GNU General Public
|
|
* License version 2, as published by the Free Software Foundation, and
|
|
* may be copied, distributed, and modified under those terms.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
*/
|
|
|
|
#include <linux/debugfs.h>
|
|
#include "sync_debug.h"
|
|
|
|
static struct dentry *dbgfs;
|
|
|
|
static LIST_HEAD(sync_timeline_list_head);
|
|
static DEFINE_SPINLOCK(sync_timeline_list_lock);
|
|
static LIST_HEAD(sync_file_list_head);
|
|
static DEFINE_SPINLOCK(sync_file_list_lock);
|
|
|
|
void sync_timeline_debug_add(struct sync_timeline *obj)
|
|
{
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&sync_timeline_list_lock, flags);
|
|
list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
|
|
spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
|
|
}
|
|
|
|
void sync_timeline_debug_remove(struct sync_timeline *obj)
|
|
{
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&sync_timeline_list_lock, flags);
|
|
list_del(&obj->sync_timeline_list);
|
|
spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
|
|
}
|
|
|
|
void sync_file_debug_add(struct sync_file *sync_file)
|
|
{
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&sync_file_list_lock, flags);
|
|
list_add_tail(&sync_file->sync_file_list, &sync_file_list_head);
|
|
spin_unlock_irqrestore(&sync_file_list_lock, flags);
|
|
}
|
|
|
|
void sync_file_debug_remove(struct sync_file *sync_file)
|
|
{
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&sync_file_list_lock, flags);
|
|
list_del(&sync_file->sync_file_list);
|
|
spin_unlock_irqrestore(&sync_file_list_lock, flags);
|
|
}
|
|
|
|
static const char *sync_status_str(int status)
|
|
{
|
|
if (status < 0)
|
|
return "error";
|
|
|
|
if (status > 0)
|
|
return "signaled";
|
|
|
|
return "active";
|
|
}
|
|
|
|
static void sync_print_fence(struct seq_file *s,
|
|
struct dma_fence *fence, bool show)
|
|
{
|
|
struct sync_timeline *parent = dma_fence_parent(fence);
|
|
int status;
|
|
|
|
status = dma_fence_get_status_locked(fence);
|
|
|
|
seq_printf(s, " %s%sfence %s",
|
|
show ? parent->name : "",
|
|
show ? "_" : "",
|
|
sync_status_str(status));
|
|
|
|
if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags)) {
|
|
struct timespec64 ts64 =
|
|
ktime_to_timespec64(fence->timestamp);
|
|
|
|
seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec);
|
|
}
|
|
|
|
if (fence->ops->timeline_value_str &&
|
|
fence->ops->fence_value_str) {
|
|
char value[64];
|
|
bool success;
|
|
|
|
fence->ops->fence_value_str(fence, value, sizeof(value));
|
|
success = strlen(value);
|
|
|
|
if (success) {
|
|
seq_printf(s, ": %s", value);
|
|
|
|
fence->ops->timeline_value_str(fence, value,
|
|
sizeof(value));
|
|
|
|
if (strlen(value))
|
|
seq_printf(s, " / %s", value);
|
|
}
|
|
}
|
|
|
|
seq_puts(s, "\n");
|
|
}
|
|
|
|
static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
|
|
{
|
|
struct list_head *pos;
|
|
unsigned long flags;
|
|
|
|
seq_printf(s, "%s: %d\n", obj->name, obj->value);
|
|
|
|
spin_lock_irqsave(&obj->child_list_lock, flags);
|
|
list_for_each(pos, &obj->child_list_head) {
|
|
struct sync_pt *pt =
|
|
container_of(pos, struct sync_pt, child_list);
|
|
sync_print_fence(s, &pt->base, false);
|
|
}
|
|
spin_unlock_irqrestore(&obj->child_list_lock, flags);
|
|
}
|
|
|
|
static void sync_print_sync_file(struct seq_file *s,
|
|
struct sync_file *sync_file)
|
|
{
|
|
int i;
|
|
|
|
seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name,
|
|
sync_status_str(dma_fence_get_status(sync_file->fence)));
|
|
|
|
if (dma_fence_is_array(sync_file->fence)) {
|
|
struct dma_fence_array *array = to_dma_fence_array(sync_file->fence);
|
|
|
|
for (i = 0; i < array->num_fences; ++i)
|
|
sync_print_fence(s, array->fences[i], true);
|
|
} else {
|
|
sync_print_fence(s, sync_file->fence, true);
|
|
}
|
|
}
|
|
|
|
static int sync_debugfs_show(struct seq_file *s, void *unused)
|
|
{
|
|
unsigned long flags;
|
|
struct list_head *pos;
|
|
|
|
seq_puts(s, "objs:\n--------------\n");
|
|
|
|
spin_lock_irqsave(&sync_timeline_list_lock, flags);
|
|
list_for_each(pos, &sync_timeline_list_head) {
|
|
struct sync_timeline *obj =
|
|
container_of(pos, struct sync_timeline,
|
|
sync_timeline_list);
|
|
|
|
sync_print_obj(s, obj);
|
|
seq_puts(s, "\n");
|
|
}
|
|
spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
|
|
|
|
seq_puts(s, "fences:\n--------------\n");
|
|
|
|
spin_lock_irqsave(&sync_file_list_lock, flags);
|
|
list_for_each(pos, &sync_file_list_head) {
|
|
struct sync_file *sync_file =
|
|
container_of(pos, struct sync_file, sync_file_list);
|
|
|
|
sync_print_sync_file(s, sync_file);
|
|
seq_puts(s, "\n");
|
|
}
|
|
spin_unlock_irqrestore(&sync_file_list_lock, flags);
|
|
return 0;
|
|
}
|
|
|
|
static int sync_info_debugfs_open(struct inode *inode, struct file *file)
|
|
{
|
|
return single_open(file, sync_debugfs_show, inode->i_private);
|
|
}
|
|
|
|
static const struct file_operations sync_info_debugfs_fops = {
|
|
.open = sync_info_debugfs_open,
|
|
.read = seq_read,
|
|
.llseek = seq_lseek,
|
|
.release = single_release,
|
|
};
|
|
|
|
static __init int sync_debugfs_init(void)
|
|
{
|
|
dbgfs = debugfs_create_dir("sync", NULL);
|
|
|
|
/*
|
|
* The debugfs files won't ever get removed and thus, there is
|
|
* no need to protect it against removal races. The use of
|
|
* debugfs_create_file_unsafe() is actually safe here.
|
|
*/
|
|
debugfs_create_file_unsafe("info", 0444, dbgfs, NULL,
|
|
&sync_info_debugfs_fops);
|
|
debugfs_create_file_unsafe("sw_sync", 0644, dbgfs, NULL,
|
|
&sw_sync_debugfs_fops);
|
|
|
|
return 0;
|
|
}
|
|
late_initcall(sync_debugfs_init);
|
|
|
|
#define DUMP_CHUNK 256
|
|
static char sync_dump_buf[64 * 1024];
|
|
void sync_dump(void)
|
|
{
|
|
struct seq_file s = {
|
|
.buf = sync_dump_buf,
|
|
.size = sizeof(sync_dump_buf) - 1,
|
|
};
|
|
int i;
|
|
|
|
sync_debugfs_show(&s, NULL);
|
|
|
|
for (i = 0; i < s.count; i += DUMP_CHUNK) {
|
|
if ((s.count - i) > DUMP_CHUNK) {
|
|
char c = s.buf[i + DUMP_CHUNK];
|
|
|
|
s.buf[i + DUMP_CHUNK] = 0;
|
|
pr_cont("%s", s.buf + i);
|
|
s.buf[i + DUMP_CHUNK] = c;
|
|
} else {
|
|
s.buf[s.count] = 0;
|
|
pr_cont("%s", s.buf + i);
|
|
}
|
|
}
|
|
}
|