Merge branch 'tracing-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'tracing-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (413 commits) tracing, net: fix net tree and tracing tree merge interaction tracing, powerpc: fix powerpc tree and tracing tree interaction ring-buffer: do not remove reader page from list on ring buffer free function-graph: allow unregistering twice trace: make argument 'mem' of trace_seq_putmem() const tracing: add missing 'extern' keywords to trace_output.h tracing: provide trace_seq_reserve() blktrace: print out BLK_TN_MESSAGE properly blktrace: extract duplidate code blktrace: fix memory leak when freeing struct blk_io_trace blktrace: fix blk_probes_ref chaos blktrace: make classic output more classic blktrace: fix off-by-one bug blktrace: fix the original blktrace blktrace: fix a race when creating blk_tree_root in debugfs blktrace: fix timestamp in binary output tracing, Text Edit Lock: cleanup tracing: filter fix for TRACE_EVENT_FORMAT events ftrace: Using FTRACE_WARN_ON() to check "freed record" in ftrace_release() x86: kretprobe-booster interrupt emulation code fix ... Fix up trivial conflicts in arch/parisc/include/asm/ftrace.h include/linux/memory.h kernel/extable.c kernel/module.c
This commit is contained in:
@@ -9,6 +9,9 @@ config USER_STACKTRACE_SUPPORT
|
||||
config NOP_TRACER
|
||||
bool
|
||||
|
||||
config HAVE_FTRACE_NMI_ENTER
|
||||
bool
|
||||
|
||||
config HAVE_FUNCTION_TRACER
|
||||
bool
|
||||
|
||||
@@ -31,12 +34,20 @@ config HAVE_FTRACE_MCOUNT_RECORD
|
||||
config HAVE_HW_BRANCH_TRACER
|
||||
bool
|
||||
|
||||
config HAVE_FTRACE_SYSCALLS
|
||||
bool
|
||||
|
||||
config TRACER_MAX_TRACE
|
||||
bool
|
||||
|
||||
config RING_BUFFER
|
||||
bool
|
||||
|
||||
config FTRACE_NMI_ENTER
|
||||
bool
|
||||
depends on HAVE_FTRACE_NMI_ENTER
|
||||
default y
|
||||
|
||||
config TRACING
|
||||
bool
|
||||
select DEBUG_FS
|
||||
@@ -44,13 +55,29 @@ config TRACING
|
||||
select STACKTRACE if STACKTRACE_SUPPORT
|
||||
select TRACEPOINTS
|
||||
select NOP_TRACER
|
||||
select BINARY_PRINTF
|
||||
|
||||
#
|
||||
# Minimum requirements an architecture has to meet for us to
|
||||
# be able to offer generic tracing facilities:
|
||||
#
|
||||
config TRACING_SUPPORT
|
||||
bool
|
||||
# PPC32 has no irqflags tracing support, but it can use most of the
|
||||
# tracers anyway, they were tested to build and work. Note that new
|
||||
# exceptions to this list aren't welcomed, better implement the
|
||||
# irqflags tracing for your architecture.
|
||||
depends on TRACE_IRQFLAGS_SUPPORT || PPC32
|
||||
depends on STACKTRACE_SUPPORT
|
||||
default y
|
||||
|
||||
if TRACING_SUPPORT
|
||||
|
||||
menu "Tracers"
|
||||
|
||||
config FUNCTION_TRACER
|
||||
bool "Kernel Function Tracer"
|
||||
depends on HAVE_FUNCTION_TRACER
|
||||
depends on DEBUG_KERNEL
|
||||
select FRAME_POINTER
|
||||
select KALLSYMS
|
||||
select TRACING
|
||||
@@ -82,7 +109,6 @@ config IRQSOFF_TRACER
|
||||
default n
|
||||
depends on TRACE_IRQFLAGS_SUPPORT
|
||||
depends on GENERIC_TIME
|
||||
depends on DEBUG_KERNEL
|
||||
select TRACE_IRQFLAGS
|
||||
select TRACING
|
||||
select TRACER_MAX_TRACE
|
||||
@@ -105,7 +131,6 @@ config PREEMPT_TRACER
|
||||
default n
|
||||
depends on GENERIC_TIME
|
||||
depends on PREEMPT
|
||||
depends on DEBUG_KERNEL
|
||||
select TRACING
|
||||
select TRACER_MAX_TRACE
|
||||
help
|
||||
@@ -126,13 +151,13 @@ config SYSPROF_TRACER
|
||||
bool "Sysprof Tracer"
|
||||
depends on X86
|
||||
select TRACING
|
||||
select CONTEXT_SWITCH_TRACER
|
||||
help
|
||||
This tracer provides the trace needed by the 'Sysprof' userspace
|
||||
tool.
|
||||
|
||||
config SCHED_TRACER
|
||||
bool "Scheduling Latency Tracer"
|
||||
depends on DEBUG_KERNEL
|
||||
select TRACING
|
||||
select CONTEXT_SWITCH_TRACER
|
||||
select TRACER_MAX_TRACE
|
||||
@@ -142,16 +167,30 @@ config SCHED_TRACER
|
||||
|
||||
config CONTEXT_SWITCH_TRACER
|
||||
bool "Trace process context switches"
|
||||
depends on DEBUG_KERNEL
|
||||
select TRACING
|
||||
select MARKERS
|
||||
help
|
||||
This tracer gets called from the context switch and records
|
||||
all switching of tasks.
|
||||
|
||||
config EVENT_TRACER
|
||||
bool "Trace various events in the kernel"
|
||||
select TRACING
|
||||
help
|
||||
This tracer hooks to various trace points in the kernel
|
||||
allowing the user to pick and choose which trace point they
|
||||
want to trace.
|
||||
|
||||
config FTRACE_SYSCALLS
|
||||
bool "Trace syscalls"
|
||||
depends on HAVE_FTRACE_SYSCALLS
|
||||
select TRACING
|
||||
select KALLSYMS
|
||||
help
|
||||
Basic tracer to catch the syscall entry and exit events.
|
||||
|
||||
config BOOT_TRACER
|
||||
bool "Trace boot initcalls"
|
||||
depends on DEBUG_KERNEL
|
||||
select TRACING
|
||||
select CONTEXT_SWITCH_TRACER
|
||||
help
|
||||
@@ -164,13 +203,11 @@ config BOOT_TRACER
|
||||
representation of the delays during initcalls - but the raw
|
||||
/debug/tracing/trace text output is readable too.
|
||||
|
||||
( Note that tracing self tests can't be enabled if this tracer is
|
||||
selected, because the self-tests are an initcall as well and that
|
||||
would invalidate the boot trace. )
|
||||
You must pass in ftrace=initcall to the kernel command line
|
||||
to enable this on bootup.
|
||||
|
||||
config TRACE_BRANCH_PROFILING
|
||||
bool "Trace likely/unlikely profiler"
|
||||
depends on DEBUG_KERNEL
|
||||
select TRACING
|
||||
help
|
||||
This tracer profiles all the the likely and unlikely macros
|
||||
@@ -223,7 +260,6 @@ config BRANCH_TRACER
|
||||
|
||||
config POWER_TRACER
|
||||
bool "Trace power consumption behavior"
|
||||
depends on DEBUG_KERNEL
|
||||
depends on X86
|
||||
select TRACING
|
||||
help
|
||||
@@ -235,7 +271,6 @@ config POWER_TRACER
|
||||
config STACK_TRACER
|
||||
bool "Trace max stack"
|
||||
depends on HAVE_FUNCTION_TRACER
|
||||
depends on DEBUG_KERNEL
|
||||
select FUNCTION_TRACER
|
||||
select STACKTRACE
|
||||
select KALLSYMS
|
||||
@@ -265,11 +300,66 @@ config HW_BRANCH_TRACER
|
||||
This tracer records all branches on the system in a circular
|
||||
buffer giving access to the last N branches for each cpu.
|
||||
|
||||
config KMEMTRACE
|
||||
bool "Trace SLAB allocations"
|
||||
select TRACING
|
||||
help
|
||||
kmemtrace provides tracing for slab allocator functions, such as
|
||||
kmalloc, kfree, kmem_cache_alloc, kmem_cache_free etc.. Collected
|
||||
data is then fed to the userspace application in order to analyse
|
||||
allocation hotspots, internal fragmentation and so on, making it
|
||||
possible to see how well an allocator performs, as well as debug
|
||||
and profile kernel code.
|
||||
|
||||
This requires an userspace application to use. See
|
||||
Documentation/vm/kmemtrace.txt for more information.
|
||||
|
||||
Saying Y will make the kernel somewhat larger and slower. However,
|
||||
if you disable kmemtrace at run-time or boot-time, the performance
|
||||
impact is minimal (depending on the arch the kernel is built for).
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config WORKQUEUE_TRACER
|
||||
bool "Trace workqueues"
|
||||
select TRACING
|
||||
help
|
||||
The workqueue tracer provides some statistical informations
|
||||
about each cpu workqueue thread such as the number of the
|
||||
works inserted and executed since their creation. It can help
|
||||
to evaluate the amount of work each of them have to perform.
|
||||
For example it can help a developer to decide whether he should
|
||||
choose a per cpu workqueue instead of a singlethreaded one.
|
||||
|
||||
config BLK_DEV_IO_TRACE
|
||||
bool "Support for tracing block io actions"
|
||||
depends on SYSFS
|
||||
depends on BLOCK
|
||||
select RELAY
|
||||
select DEBUG_FS
|
||||
select TRACEPOINTS
|
||||
select TRACING
|
||||
select STACKTRACE
|
||||
help
|
||||
Say Y here if you want to be able to trace the block layer actions
|
||||
on a given queue. Tracing allows you to see any traffic happening
|
||||
on a block device queue. For more information (and the userspace
|
||||
support tools needed), fetch the blktrace tools from:
|
||||
|
||||
git://git.kernel.dk/blktrace.git
|
||||
|
||||
Tracing also is possible using the ftrace interface, e.g.:
|
||||
|
||||
echo 1 > /sys/block/sda/sda1/trace/enable
|
||||
echo blk > /sys/kernel/debug/tracing/current_tracer
|
||||
cat /sys/kernel/debug/tracing/trace_pipe
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config DYNAMIC_FTRACE
|
||||
bool "enable/disable ftrace tracepoints dynamically"
|
||||
depends on FUNCTION_TRACER
|
||||
depends on HAVE_DYNAMIC_FTRACE
|
||||
depends on DEBUG_KERNEL
|
||||
default y
|
||||
help
|
||||
This option will modify all the calls to ftrace dynamically
|
||||
@@ -295,7 +385,7 @@ config FTRACE_SELFTEST
|
||||
|
||||
config FTRACE_STARTUP_TEST
|
||||
bool "Perform a startup test on ftrace"
|
||||
depends on TRACING && DEBUG_KERNEL && !BOOT_TRACER
|
||||
depends on TRACING
|
||||
select FTRACE_SELFTEST
|
||||
help
|
||||
This option performs a series of startup tests on ftrace. On bootup
|
||||
@@ -305,7 +395,7 @@ config FTRACE_STARTUP_TEST
|
||||
|
||||
config MMIOTRACE
|
||||
bool "Memory mapped IO tracing"
|
||||
depends on HAVE_MMIOTRACE_SUPPORT && DEBUG_KERNEL && PCI
|
||||
depends on HAVE_MMIOTRACE_SUPPORT && PCI
|
||||
select TRACING
|
||||
help
|
||||
Mmiotrace traces Memory Mapped I/O access and is meant for
|
||||
@@ -327,3 +417,6 @@ config MMIOTRACE_TEST
|
||||
Say N, unless you absolutely know what you are doing.
|
||||
|
||||
endmenu
|
||||
|
||||
endif # TRACING_SUPPORT
|
||||
|
||||
|
@@ -19,6 +19,10 @@ obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o
|
||||
obj-$(CONFIG_RING_BUFFER) += ring_buffer.o
|
||||
|
||||
obj-$(CONFIG_TRACING) += trace.o
|
||||
obj-$(CONFIG_TRACING) += trace_clock.o
|
||||
obj-$(CONFIG_TRACING) += trace_output.o
|
||||
obj-$(CONFIG_TRACING) += trace_stat.o
|
||||
obj-$(CONFIG_TRACING) += trace_printk.o
|
||||
obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
|
||||
obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o
|
||||
obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
|
||||
@@ -33,5 +37,14 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o
|
||||
obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o
|
||||
obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o
|
||||
obj-$(CONFIG_POWER_TRACER) += trace_power.o
|
||||
obj-$(CONFIG_KMEMTRACE) += kmemtrace.o
|
||||
obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o
|
||||
obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
|
||||
obj-$(CONFIG_EVENT_TRACER) += trace_events.o
|
||||
obj-$(CONFIG_EVENT_TRACER) += events.o
|
||||
obj-$(CONFIG_EVENT_TRACER) += trace_export.o
|
||||
obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o
|
||||
obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o
|
||||
obj-$(CONFIG_EVENT_TRACER) += trace_events_filter.o
|
||||
|
||||
libftrace-y := ftrace.o
|
||||
|
1549
kernel/trace/blktrace.c
Normal file
1549
kernel/trace/blktrace.c
Normal file
File diff suppressed because it is too large
Load Diff
14
kernel/trace/events.c
Normal file
14
kernel/trace/events.c
Normal file
@@ -0,0 +1,14 @@
|
||||
/*
|
||||
* This is the place to register all trace points as events.
|
||||
*/
|
||||
|
||||
#include <linux/stringify.h>
|
||||
|
||||
#include <trace/trace_events.h>
|
||||
|
||||
#include "trace_output.h"
|
||||
|
||||
#include "trace_events_stage_1.h"
|
||||
#include "trace_events_stage_2.h"
|
||||
#include "trace_events_stage_3.h"
|
||||
|
File diff suppressed because it is too large
Load Diff
339
kernel/trace/kmemtrace.c
Normal file
339
kernel/trace/kmemtrace.c
Normal file
@@ -0,0 +1,339 @@
|
||||
/*
|
||||
* Memory allocator tracing
|
||||
*
|
||||
* Copyright (C) 2008 Eduard - Gabriel Munteanu
|
||||
* Copyright (C) 2008 Pekka Enberg <penberg@cs.helsinki.fi>
|
||||
* Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
|
||||
*/
|
||||
|
||||
#include <linux/dcache.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <trace/kmemtrace.h>
|
||||
|
||||
#include "trace.h"
|
||||
#include "trace_output.h"
|
||||
|
||||
/* Select an alternative, minimalistic output than the original one */
|
||||
#define TRACE_KMEM_OPT_MINIMAL 0x1
|
||||
|
||||
static struct tracer_opt kmem_opts[] = {
|
||||
/* Default disable the minimalistic output */
|
||||
{ TRACER_OPT(kmem_minimalistic, TRACE_KMEM_OPT_MINIMAL) },
|
||||
{ }
|
||||
};
|
||||
|
||||
static struct tracer_flags kmem_tracer_flags = {
|
||||
.val = 0,
|
||||
.opts = kmem_opts
|
||||
};
|
||||
|
||||
|
||||
static bool kmem_tracing_enabled __read_mostly;
|
||||
static struct trace_array *kmemtrace_array;
|
||||
|
||||
static int kmem_trace_init(struct trace_array *tr)
|
||||
{
|
||||
int cpu;
|
||||
kmemtrace_array = tr;
|
||||
|
||||
for_each_cpu_mask(cpu, cpu_possible_map)
|
||||
tracing_reset(tr, cpu);
|
||||
|
||||
kmem_tracing_enabled = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void kmem_trace_reset(struct trace_array *tr)
|
||||
{
|
||||
kmem_tracing_enabled = false;
|
||||
}
|
||||
|
||||
static void kmemtrace_headers(struct seq_file *s)
|
||||
{
|
||||
/* Don't need headers for the original kmemtrace output */
|
||||
if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL))
|
||||
return;
|
||||
|
||||
seq_printf(s, "#\n");
|
||||
seq_printf(s, "# ALLOC TYPE REQ GIVEN FLAGS "
|
||||
" POINTER NODE CALLER\n");
|
||||
seq_printf(s, "# FREE | | | | "
|
||||
" | | | |\n");
|
||||
seq_printf(s, "# |\n\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* The two following functions give the original output from kmemtrace,
|
||||
* or something close to....perhaps they need some missing things
|
||||
*/
|
||||
static enum print_line_t
|
||||
kmemtrace_print_alloc_original(struct trace_iterator *iter,
|
||||
struct kmemtrace_alloc_entry *entry)
|
||||
{
|
||||
struct trace_seq *s = &iter->seq;
|
||||
int ret;
|
||||
|
||||
/* Taken from the old linux/kmemtrace.h */
|
||||
ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu "
|
||||
"bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d\n",
|
||||
entry->type_id, entry->call_site, (unsigned long) entry->ptr,
|
||||
(unsigned long) entry->bytes_req, (unsigned long) entry->bytes_alloc,
|
||||
(unsigned long) entry->gfp_flags, entry->node);
|
||||
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
return TRACE_TYPE_HANDLED;
|
||||
}
|
||||
|
||||
static enum print_line_t
|
||||
kmemtrace_print_free_original(struct trace_iterator *iter,
|
||||
struct kmemtrace_free_entry *entry)
|
||||
{
|
||||
struct trace_seq *s = &iter->seq;
|
||||
int ret;
|
||||
|
||||
/* Taken from the old linux/kmemtrace.h */
|
||||
ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu\n",
|
||||
entry->type_id, entry->call_site, (unsigned long) entry->ptr);
|
||||
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
return TRACE_TYPE_HANDLED;
|
||||
}
|
||||
|
||||
|
||||
/* The two other following provide a more minimalistic output */
|
||||
static enum print_line_t
|
||||
kmemtrace_print_alloc_compress(struct trace_iterator *iter,
|
||||
struct kmemtrace_alloc_entry *entry)
|
||||
{
|
||||
struct trace_seq *s = &iter->seq;
|
||||
int ret;
|
||||
|
||||
/* Alloc entry */
|
||||
ret = trace_seq_printf(s, " + ");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Type */
|
||||
switch (entry->type_id) {
|
||||
case KMEMTRACE_TYPE_KMALLOC:
|
||||
ret = trace_seq_printf(s, "K ");
|
||||
break;
|
||||
case KMEMTRACE_TYPE_CACHE:
|
||||
ret = trace_seq_printf(s, "C ");
|
||||
break;
|
||||
case KMEMTRACE_TYPE_PAGES:
|
||||
ret = trace_seq_printf(s, "P ");
|
||||
break;
|
||||
default:
|
||||
ret = trace_seq_printf(s, "? ");
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Requested */
|
||||
ret = trace_seq_printf(s, "%4zu ", entry->bytes_req);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Allocated */
|
||||
ret = trace_seq_printf(s, "%4zu ", entry->bytes_alloc);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Flags
|
||||
* TODO: would be better to see the name of the GFP flag names
|
||||
*/
|
||||
ret = trace_seq_printf(s, "%08x ", entry->gfp_flags);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Pointer to allocated */
|
||||
ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Node */
|
||||
ret = trace_seq_printf(s, "%4d ", entry->node);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Call site */
|
||||
ret = seq_print_ip_sym(s, entry->call_site, 0);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
if (!trace_seq_printf(s, "\n"))
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
return TRACE_TYPE_HANDLED;
|
||||
}
|
||||
|
||||
static enum print_line_t
|
||||
kmemtrace_print_free_compress(struct trace_iterator *iter,
|
||||
struct kmemtrace_free_entry *entry)
|
||||
{
|
||||
struct trace_seq *s = &iter->seq;
|
||||
int ret;
|
||||
|
||||
/* Free entry */
|
||||
ret = trace_seq_printf(s, " - ");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Type */
|
||||
switch (entry->type_id) {
|
||||
case KMEMTRACE_TYPE_KMALLOC:
|
||||
ret = trace_seq_printf(s, "K ");
|
||||
break;
|
||||
case KMEMTRACE_TYPE_CACHE:
|
||||
ret = trace_seq_printf(s, "C ");
|
||||
break;
|
||||
case KMEMTRACE_TYPE_PAGES:
|
||||
ret = trace_seq_printf(s, "P ");
|
||||
break;
|
||||
default:
|
||||
ret = trace_seq_printf(s, "? ");
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Skip requested/allocated/flags */
|
||||
ret = trace_seq_printf(s, " ");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Pointer to allocated */
|
||||
ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Skip node */
|
||||
ret = trace_seq_printf(s, " ");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Call site */
|
||||
ret = seq_print_ip_sym(s, entry->call_site, 0);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
if (!trace_seq_printf(s, "\n"))
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
return TRACE_TYPE_HANDLED;
|
||||
}
|
||||
|
||||
static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter)
|
||||
{
|
||||
struct trace_entry *entry = iter->ent;
|
||||
|
||||
switch (entry->type) {
|
||||
case TRACE_KMEM_ALLOC: {
|
||||
struct kmemtrace_alloc_entry *field;
|
||||
trace_assign_type(field, entry);
|
||||
if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
|
||||
return kmemtrace_print_alloc_compress(iter, field);
|
||||
else
|
||||
return kmemtrace_print_alloc_original(iter, field);
|
||||
}
|
||||
|
||||
case TRACE_KMEM_FREE: {
|
||||
struct kmemtrace_free_entry *field;
|
||||
trace_assign_type(field, entry);
|
||||
if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
|
||||
return kmemtrace_print_free_compress(iter, field);
|
||||
else
|
||||
return kmemtrace_print_free_original(iter, field);
|
||||
}
|
||||
|
||||
default:
|
||||
return TRACE_TYPE_UNHANDLED;
|
||||
}
|
||||
}
|
||||
|
||||
/* Trace allocations */
|
||||
void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
|
||||
unsigned long call_site,
|
||||
const void *ptr,
|
||||
size_t bytes_req,
|
||||
size_t bytes_alloc,
|
||||
gfp_t gfp_flags,
|
||||
int node)
|
||||
{
|
||||
struct ring_buffer_event *event;
|
||||
struct kmemtrace_alloc_entry *entry;
|
||||
struct trace_array *tr = kmemtrace_array;
|
||||
|
||||
if (!kmem_tracing_enabled)
|
||||
return;
|
||||
|
||||
event = trace_buffer_lock_reserve(tr, TRACE_KMEM_ALLOC,
|
||||
sizeof(*entry), 0, 0);
|
||||
if (!event)
|
||||
return;
|
||||
entry = ring_buffer_event_data(event);
|
||||
|
||||
entry->call_site = call_site;
|
||||
entry->ptr = ptr;
|
||||
entry->bytes_req = bytes_req;
|
||||
entry->bytes_alloc = bytes_alloc;
|
||||
entry->gfp_flags = gfp_flags;
|
||||
entry->node = node;
|
||||
|
||||
trace_buffer_unlock_commit(tr, event, 0, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(kmemtrace_mark_alloc_node);
|
||||
|
||||
void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
|
||||
unsigned long call_site,
|
||||
const void *ptr)
|
||||
{
|
||||
struct ring_buffer_event *event;
|
||||
struct kmemtrace_free_entry *entry;
|
||||
struct trace_array *tr = kmemtrace_array;
|
||||
|
||||
if (!kmem_tracing_enabled)
|
||||
return;
|
||||
|
||||
event = trace_buffer_lock_reserve(tr, TRACE_KMEM_FREE,
|
||||
sizeof(*entry), 0, 0);
|
||||
if (!event)
|
||||
return;
|
||||
entry = ring_buffer_event_data(event);
|
||||
entry->type_id = type_id;
|
||||
entry->call_site = call_site;
|
||||
entry->ptr = ptr;
|
||||
|
||||
trace_buffer_unlock_commit(tr, event, 0, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(kmemtrace_mark_free);
|
||||
|
||||
static struct tracer kmem_tracer __read_mostly = {
|
||||
.name = "kmemtrace",
|
||||
.init = kmem_trace_init,
|
||||
.reset = kmem_trace_reset,
|
||||
.print_line = kmemtrace_print_line,
|
||||
.print_header = kmemtrace_headers,
|
||||
.flags = &kmem_tracer_flags
|
||||
};
|
||||
|
||||
void kmemtrace_init(void)
|
||||
{
|
||||
/* earliest opportunity to start kmem tracing */
|
||||
}
|
||||
|
||||
static int __init init_kmem_tracer(void)
|
||||
{
|
||||
return register_tracer(&kmem_tracer);
|
||||
}
|
||||
|
||||
device_initcall(init_kmem_tracer);
|
File diff suppressed because it is too large
Load Diff
3060
kernel/trace/trace.c
3060
kernel/trace/trace.c
File diff suppressed because it is too large
Load Diff
@@ -9,6 +9,8 @@
|
||||
#include <linux/mmiotrace.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <trace/boot.h>
|
||||
#include <trace/kmemtrace.h>
|
||||
#include <trace/power.h>
|
||||
|
||||
enum trace_type {
|
||||
__TRACE_FIRST_TYPE = 0,
|
||||
@@ -16,9 +18,9 @@ enum trace_type {
|
||||
TRACE_FN,
|
||||
TRACE_CTX,
|
||||
TRACE_WAKE,
|
||||
TRACE_CONT,
|
||||
TRACE_STACK,
|
||||
TRACE_PRINT,
|
||||
TRACE_BPRINT,
|
||||
TRACE_SPECIAL,
|
||||
TRACE_MMIO_RW,
|
||||
TRACE_MMIO_MAP,
|
||||
@@ -29,9 +31,14 @@ enum trace_type {
|
||||
TRACE_GRAPH_ENT,
|
||||
TRACE_USER_STACK,
|
||||
TRACE_HW_BRANCHES,
|
||||
TRACE_SYSCALL_ENTER,
|
||||
TRACE_SYSCALL_EXIT,
|
||||
TRACE_KMEM_ALLOC,
|
||||
TRACE_KMEM_FREE,
|
||||
TRACE_POWER,
|
||||
TRACE_BLK,
|
||||
|
||||
__TRACE_LAST_TYPE
|
||||
__TRACE_LAST_TYPE,
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -42,7 +49,6 @@ enum trace_type {
|
||||
*/
|
||||
struct trace_entry {
|
||||
unsigned char type;
|
||||
unsigned char cpu;
|
||||
unsigned char flags;
|
||||
unsigned char preempt_count;
|
||||
int pid;
|
||||
@@ -60,13 +66,13 @@ struct ftrace_entry {
|
||||
|
||||
/* Function call entry */
|
||||
struct ftrace_graph_ent_entry {
|
||||
struct trace_entry ent;
|
||||
struct trace_entry ent;
|
||||
struct ftrace_graph_ent graph_ent;
|
||||
};
|
||||
|
||||
/* Function return entry */
|
||||
struct ftrace_graph_ret_entry {
|
||||
struct trace_entry ent;
|
||||
struct trace_entry ent;
|
||||
struct ftrace_graph_ret ret;
|
||||
};
|
||||
extern struct tracer boot_tracer;
|
||||
@@ -112,12 +118,18 @@ struct userstack_entry {
|
||||
};
|
||||
|
||||
/*
|
||||
* ftrace_printk entry:
|
||||
* trace_printk entry:
|
||||
*/
|
||||
struct bprint_entry {
|
||||
struct trace_entry ent;
|
||||
unsigned long ip;
|
||||
const char *fmt;
|
||||
u32 buf[];
|
||||
};
|
||||
|
||||
struct print_entry {
|
||||
struct trace_entry ent;
|
||||
unsigned long ip;
|
||||
int depth;
|
||||
char buf[];
|
||||
};
|
||||
|
||||
@@ -170,15 +182,45 @@ struct trace_power {
|
||||
struct power_trace state_data;
|
||||
};
|
||||
|
||||
struct kmemtrace_alloc_entry {
|
||||
struct trace_entry ent;
|
||||
enum kmemtrace_type_id type_id;
|
||||
unsigned long call_site;
|
||||
const void *ptr;
|
||||
size_t bytes_req;
|
||||
size_t bytes_alloc;
|
||||
gfp_t gfp_flags;
|
||||
int node;
|
||||
};
|
||||
|
||||
struct kmemtrace_free_entry {
|
||||
struct trace_entry ent;
|
||||
enum kmemtrace_type_id type_id;
|
||||
unsigned long call_site;
|
||||
const void *ptr;
|
||||
};
|
||||
|
||||
struct syscall_trace_enter {
|
||||
struct trace_entry ent;
|
||||
int nr;
|
||||
unsigned long args[];
|
||||
};
|
||||
|
||||
struct syscall_trace_exit {
|
||||
struct trace_entry ent;
|
||||
int nr;
|
||||
unsigned long ret;
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* trace_flag_type is an enumeration that holds different
|
||||
* states when a trace occurs. These are:
|
||||
* IRQS_OFF - interrupts were disabled
|
||||
* IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
|
||||
* IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
|
||||
* NEED_RESCED - reschedule is requested
|
||||
* HARDIRQ - inside an interrupt handler
|
||||
* SOFTIRQ - inside a softirq handler
|
||||
* CONT - multiple entries hold the trace item
|
||||
*/
|
||||
enum trace_flag_type {
|
||||
TRACE_FLAG_IRQS_OFF = 0x01,
|
||||
@@ -186,7 +228,6 @@ enum trace_flag_type {
|
||||
TRACE_FLAG_NEED_RESCHED = 0x04,
|
||||
TRACE_FLAG_HARDIRQ = 0x08,
|
||||
TRACE_FLAG_SOFTIRQ = 0x10,
|
||||
TRACE_FLAG_CONT = 0x20,
|
||||
};
|
||||
|
||||
#define TRACE_BUF_SIZE 1024
|
||||
@@ -198,6 +239,7 @@ enum trace_flag_type {
|
||||
*/
|
||||
struct trace_array_cpu {
|
||||
atomic_t disabled;
|
||||
void *buffer_page; /* ring buffer spare */
|
||||
|
||||
/* these fields get copied into max-trace: */
|
||||
unsigned long trace_idx;
|
||||
@@ -262,10 +304,10 @@ extern void __ftrace_bad_type(void);
|
||||
do { \
|
||||
IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
|
||||
IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
|
||||
IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \
|
||||
IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
|
||||
IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
|
||||
IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
|
||||
IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
|
||||
IF_ASSIGN(var, ent, struct special_entry, 0); \
|
||||
IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
|
||||
TRACE_MMIO_RW); \
|
||||
@@ -279,7 +321,15 @@ extern void __ftrace_bad_type(void);
|
||||
IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
|
||||
TRACE_GRAPH_RET); \
|
||||
IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\
|
||||
IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
|
||||
IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
|
||||
IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \
|
||||
TRACE_KMEM_ALLOC); \
|
||||
IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \
|
||||
TRACE_KMEM_FREE); \
|
||||
IF_ASSIGN(var, ent, struct syscall_trace_enter, \
|
||||
TRACE_SYSCALL_ENTER); \
|
||||
IF_ASSIGN(var, ent, struct syscall_trace_exit, \
|
||||
TRACE_SYSCALL_EXIT); \
|
||||
__ftrace_bad_type(); \
|
||||
} while (0)
|
||||
|
||||
@@ -287,7 +337,8 @@ extern void __ftrace_bad_type(void);
|
||||
enum print_line_t {
|
||||
TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
|
||||
TRACE_TYPE_HANDLED = 1,
|
||||
TRACE_TYPE_UNHANDLED = 2 /* Relay to other output functions */
|
||||
TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */
|
||||
TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
|
||||
};
|
||||
|
||||
|
||||
@@ -297,8 +348,8 @@ enum print_line_t {
|
||||
* flags value in struct tracer_flags.
|
||||
*/
|
||||
struct tracer_opt {
|
||||
const char *name; /* Will appear on the trace_options file */
|
||||
u32 bit; /* Mask assigned in val field in tracer_flags */
|
||||
const char *name; /* Will appear on the trace_options file */
|
||||
u32 bit; /* Mask assigned in val field in tracer_flags */
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -307,28 +358,51 @@ struct tracer_opt {
|
||||
*/
|
||||
struct tracer_flags {
|
||||
u32 val;
|
||||
struct tracer_opt *opts;
|
||||
struct tracer_opt *opts;
|
||||
};
|
||||
|
||||
/* Makes more easy to define a tracer opt */
|
||||
#define TRACER_OPT(s, b) .name = #s, .bit = b
|
||||
|
||||
/*
|
||||
* A specific tracer, represented by methods that operate on a trace array:
|
||||
|
||||
/**
|
||||
* struct tracer - a specific tracer and its callbacks to interact with debugfs
|
||||
* @name: the name chosen to select it on the available_tracers file
|
||||
* @init: called when one switches to this tracer (echo name > current_tracer)
|
||||
* @reset: called when one switches to another tracer
|
||||
* @start: called when tracing is unpaused (echo 1 > tracing_enabled)
|
||||
* @stop: called when tracing is paused (echo 0 > tracing_enabled)
|
||||
* @open: called when the trace file is opened
|
||||
* @pipe_open: called when the trace_pipe file is opened
|
||||
* @wait_pipe: override how the user waits for traces on trace_pipe
|
||||
* @close: called when the trace file is released
|
||||
* @read: override the default read callback on trace_pipe
|
||||
* @splice_read: override the default splice_read callback on trace_pipe
|
||||
* @selftest: selftest to run on boot (see trace_selftest.c)
|
||||
* @print_headers: override the first lines that describe your columns
|
||||
* @print_line: callback that prints a trace
|
||||
* @set_flag: signals one of your private flags changed (trace_options file)
|
||||
* @flags: your private flags
|
||||
*/
|
||||
struct tracer {
|
||||
const char *name;
|
||||
/* Your tracer should raise a warning if init fails */
|
||||
int (*init)(struct trace_array *tr);
|
||||
void (*reset)(struct trace_array *tr);
|
||||
void (*start)(struct trace_array *tr);
|
||||
void (*stop)(struct trace_array *tr);
|
||||
void (*open)(struct trace_iterator *iter);
|
||||
void (*pipe_open)(struct trace_iterator *iter);
|
||||
void (*wait_pipe)(struct trace_iterator *iter);
|
||||
void (*close)(struct trace_iterator *iter);
|
||||
ssize_t (*read)(struct trace_iterator *iter,
|
||||
struct file *filp, char __user *ubuf,
|
||||
size_t cnt, loff_t *ppos);
|
||||
ssize_t (*splice_read)(struct trace_iterator *iter,
|
||||
struct file *filp,
|
||||
loff_t *ppos,
|
||||
struct pipe_inode_info *pipe,
|
||||
size_t len,
|
||||
unsigned int flags);
|
||||
#ifdef CONFIG_FTRACE_STARTUP_TEST
|
||||
int (*selftest)(struct tracer *trace,
|
||||
struct trace_array *tr);
|
||||
@@ -339,7 +413,8 @@ struct tracer {
|
||||
int (*set_flag)(u32 old_flags, u32 bit, int set);
|
||||
struct tracer *next;
|
||||
int print_max;
|
||||
struct tracer_flags *flags;
|
||||
struct tracer_flags *flags;
|
||||
struct tracer_stat *stats;
|
||||
};
|
||||
|
||||
struct trace_seq {
|
||||
@@ -348,6 +423,16 @@ struct trace_seq {
|
||||
unsigned int readpos;
|
||||
};
|
||||
|
||||
static inline void
|
||||
trace_seq_init(struct trace_seq *s)
|
||||
{
|
||||
s->len = 0;
|
||||
s->readpos = 0;
|
||||
}
|
||||
|
||||
|
||||
#define TRACE_PIPE_ALL_CPU -1
|
||||
|
||||
/*
|
||||
* Trace iterator - used by printout routines who present trace
|
||||
* results to users and which routines might sleep, etc:
|
||||
@@ -356,6 +441,8 @@ struct trace_iterator {
|
||||
struct trace_array *tr;
|
||||
struct tracer *trace;
|
||||
void *private;
|
||||
int cpu_file;
|
||||
struct mutex mutex;
|
||||
struct ring_buffer_iter *buffer_iter[NR_CPUS];
|
||||
|
||||
/* The below is zeroed out in pipe_read */
|
||||
@@ -371,6 +458,7 @@ struct trace_iterator {
|
||||
cpumask_var_t started;
|
||||
};
|
||||
|
||||
int tracer_init(struct tracer *t, struct trace_array *tr);
|
||||
int tracing_is_enabled(void);
|
||||
void trace_wake_up(void);
|
||||
void tracing_reset(struct trace_array *tr, int cpu);
|
||||
@@ -379,26 +467,50 @@ int tracing_open_generic(struct inode *inode, struct file *filp);
|
||||
struct dentry *tracing_init_dentry(void);
|
||||
void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
|
||||
|
||||
struct ring_buffer_event;
|
||||
|
||||
struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr,
|
||||
unsigned char type,
|
||||
unsigned long len,
|
||||
unsigned long flags,
|
||||
int pc);
|
||||
void trace_buffer_unlock_commit(struct trace_array *tr,
|
||||
struct ring_buffer_event *event,
|
||||
unsigned long flags, int pc);
|
||||
|
||||
struct ring_buffer_event *
|
||||
trace_current_buffer_lock_reserve(unsigned char type, unsigned long len,
|
||||
unsigned long flags, int pc);
|
||||
void trace_current_buffer_unlock_commit(struct ring_buffer_event *event,
|
||||
unsigned long flags, int pc);
|
||||
void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event,
|
||||
unsigned long flags, int pc);
|
||||
|
||||
struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
|
||||
struct trace_array_cpu *data);
|
||||
|
||||
struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
|
||||
int *ent_cpu, u64 *ent_ts);
|
||||
|
||||
void tracing_generic_entry_update(struct trace_entry *entry,
|
||||
unsigned long flags,
|
||||
int pc);
|
||||
|
||||
void default_wait_pipe(struct trace_iterator *iter);
|
||||
void poll_wait_pipe(struct trace_iterator *iter);
|
||||
|
||||
void ftrace(struct trace_array *tr,
|
||||
struct trace_array_cpu *data,
|
||||
unsigned long ip,
|
||||
unsigned long parent_ip,
|
||||
unsigned long flags, int pc);
|
||||
void tracing_sched_switch_trace(struct trace_array *tr,
|
||||
struct trace_array_cpu *data,
|
||||
struct task_struct *prev,
|
||||
struct task_struct *next,
|
||||
unsigned long flags, int pc);
|
||||
void tracing_record_cmdline(struct task_struct *tsk);
|
||||
|
||||
void tracing_sched_wakeup_trace(struct trace_array *tr,
|
||||
struct trace_array_cpu *data,
|
||||
struct task_struct *wakee,
|
||||
struct task_struct *cur,
|
||||
unsigned long flags, int pc);
|
||||
@@ -408,14 +520,12 @@ void trace_special(struct trace_array *tr,
|
||||
unsigned long arg2,
|
||||
unsigned long arg3, int pc);
|
||||
void trace_function(struct trace_array *tr,
|
||||
struct trace_array_cpu *data,
|
||||
unsigned long ip,
|
||||
unsigned long parent_ip,
|
||||
unsigned long flags, int pc);
|
||||
|
||||
void trace_graph_return(struct ftrace_graph_ret *trace);
|
||||
int trace_graph_entry(struct ftrace_graph_ent *trace);
|
||||
void trace_hw_branch(struct trace_array *tr, u64 from, u64 to);
|
||||
|
||||
void tracing_start_cmdline_record(void);
|
||||
void tracing_stop_cmdline_record(void);
|
||||
@@ -434,15 +544,11 @@ void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
|
||||
void update_max_tr_single(struct trace_array *tr,
|
||||
struct task_struct *tsk, int cpu);
|
||||
|
||||
extern cycle_t ftrace_now(int cpu);
|
||||
void __trace_stack(struct trace_array *tr,
|
||||
unsigned long flags,
|
||||
int skip, int pc);
|
||||
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
void tracing_start_function_trace(void);
|
||||
void tracing_stop_function_trace(void);
|
||||
#else
|
||||
# define tracing_start_function_trace() do { } while (0)
|
||||
# define tracing_stop_function_trace() do { } while (0)
|
||||
#endif
|
||||
extern cycle_t ftrace_now(int cpu);
|
||||
|
||||
#ifdef CONFIG_CONTEXT_SWITCH_TRACER
|
||||
typedef void
|
||||
@@ -456,10 +562,10 @@ struct tracer_switch_ops {
|
||||
void *private;
|
||||
struct tracer_switch_ops *next;
|
||||
};
|
||||
|
||||
char *trace_find_cmdline(int pid);
|
||||
#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
|
||||
|
||||
extern void trace_find_cmdline(int pid, char comm[]);
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
extern unsigned long ftrace_update_tot_cnt;
|
||||
#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
|
||||
@@ -469,6 +575,8 @@ extern int DYN_FTRACE_TEST_NAME(void);
|
||||
#ifdef CONFIG_FTRACE_STARTUP_TEST
|
||||
extern int trace_selftest_startup_function(struct tracer *trace,
|
||||
struct trace_array *tr);
|
||||
extern int trace_selftest_startup_function_graph(struct tracer *trace,
|
||||
struct trace_array *tr);
|
||||
extern int trace_selftest_startup_irqsoff(struct tracer *trace,
|
||||
struct trace_array *tr);
|
||||
extern int trace_selftest_startup_preemptoff(struct tracer *trace,
|
||||
@@ -488,18 +596,11 @@ extern int trace_selftest_startup_branch(struct tracer *trace,
|
||||
#endif /* CONFIG_FTRACE_STARTUP_TEST */
|
||||
|
||||
extern void *head_page(struct trace_array_cpu *data);
|
||||
extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
|
||||
extern void trace_seq_print_cont(struct trace_seq *s,
|
||||
struct trace_iterator *iter);
|
||||
|
||||
extern int
|
||||
seq_print_ip_sym(struct trace_seq *s, unsigned long ip,
|
||||
unsigned long sym_flags);
|
||||
extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
|
||||
size_t cnt);
|
||||
extern long ns2usecs(cycle_t nsec);
|
||||
extern int
|
||||
trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args);
|
||||
trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
|
||||
extern int
|
||||
trace_vprintk(unsigned long ip, const char *fmt, va_list args);
|
||||
|
||||
extern unsigned long trace_flags;
|
||||
|
||||
@@ -580,7 +681,11 @@ enum trace_iterator_flags {
|
||||
TRACE_ITER_ANNOTATE = 0x2000,
|
||||
TRACE_ITER_USERSTACKTRACE = 0x4000,
|
||||
TRACE_ITER_SYM_USEROBJ = 0x8000,
|
||||
TRACE_ITER_PRINTK_MSGONLY = 0x10000
|
||||
TRACE_ITER_PRINTK_MSGONLY = 0x10000,
|
||||
TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */
|
||||
TRACE_ITER_LATENCY_FMT = 0x40000,
|
||||
TRACE_ITER_GLOBAL_CLK = 0x80000,
|
||||
TRACE_ITER_SLEEP_TIME = 0x100000,
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -601,12 +706,12 @@ extern struct tracer nop_trace;
|
||||
* preempt_enable (after a disable), a schedule might take place
|
||||
* causing an infinite recursion.
|
||||
*
|
||||
* To prevent this, we read the need_recshed flag before
|
||||
* To prevent this, we read the need_resched flag before
|
||||
* disabling preemption. When we want to enable preemption we
|
||||
* check the flag, if it is set, then we call preempt_enable_no_resched.
|
||||
* Otherwise, we call preempt_enable.
|
||||
*
|
||||
* The rational for doing the above is that if need resched is set
|
||||
* The rational for doing the above is that if need_resched is set
|
||||
* and we have yet to reschedule, we are either in an atomic location
|
||||
* (where we do not need to check for scheduling) or we are inside
|
||||
* the scheduler and do not want to resched.
|
||||
@@ -627,7 +732,7 @@ static inline int ftrace_preempt_disable(void)
|
||||
*
|
||||
* This is a scheduler safe way to enable preemption and not miss
|
||||
* any preemption checks. The disabled saved the state of preemption.
|
||||
* If resched is set, then we were either inside an atomic or
|
||||
* If resched is set, then we are either inside an atomic or
|
||||
* are inside the scheduler (we would have already scheduled
|
||||
* otherwise). In this case, we do not want to call normal
|
||||
* preempt_enable, but preempt_enable_no_resched instead.
|
||||
@@ -664,4 +769,118 @@ static inline void trace_branch_disable(void)
|
||||
}
|
||||
#endif /* CONFIG_BRANCH_TRACER */
|
||||
|
||||
/* set ring buffers to default size if not already done so */
|
||||
int tracing_update_buffers(void);
|
||||
|
||||
/* trace event type bit fields, not numeric */
|
||||
enum {
|
||||
TRACE_EVENT_TYPE_PRINTF = 1,
|
||||
TRACE_EVENT_TYPE_RAW = 2,
|
||||
};
|
||||
|
||||
struct ftrace_event_field {
|
||||
struct list_head link;
|
||||
char *name;
|
||||
char *type;
|
||||
int offset;
|
||||
int size;
|
||||
};
|
||||
|
||||
struct ftrace_event_call {
|
||||
char *name;
|
||||
char *system;
|
||||
struct dentry *dir;
|
||||
int enabled;
|
||||
int (*regfunc)(void);
|
||||
void (*unregfunc)(void);
|
||||
int id;
|
||||
int (*raw_init)(void);
|
||||
int (*show_format)(struct trace_seq *s);
|
||||
int (*define_fields)(void);
|
||||
struct list_head fields;
|
||||
struct filter_pred **preds;
|
||||
|
||||
#ifdef CONFIG_EVENT_PROFILE
|
||||
atomic_t profile_count;
|
||||
int (*profile_enable)(struct ftrace_event_call *);
|
||||
void (*profile_disable)(struct ftrace_event_call *);
|
||||
#endif
|
||||
};
|
||||
|
||||
struct event_subsystem {
|
||||
struct list_head list;
|
||||
const char *name;
|
||||
struct dentry *entry;
|
||||
struct filter_pred **preds;
|
||||
};
|
||||
|
||||
#define events_for_each(event) \
|
||||
for (event = __start_ftrace_events; \
|
||||
(unsigned long)event < (unsigned long)__stop_ftrace_events; \
|
||||
event++)
|
||||
|
||||
#define MAX_FILTER_PRED 8
|
||||
|
||||
struct filter_pred;
|
||||
|
||||
typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
|
||||
|
||||
struct filter_pred {
|
||||
filter_pred_fn_t fn;
|
||||
u64 val;
|
||||
char *str_val;
|
||||
int str_len;
|
||||
char *field_name;
|
||||
int offset;
|
||||
int not;
|
||||
int or;
|
||||
int compound;
|
||||
int clear;
|
||||
};
|
||||
|
||||
int trace_define_field(struct ftrace_event_call *call, char *type,
|
||||
char *name, int offset, int size);
|
||||
extern void filter_free_pred(struct filter_pred *pred);
|
||||
extern void filter_print_preds(struct filter_pred **preds,
|
||||
struct trace_seq *s);
|
||||
extern int filter_parse(char **pbuf, struct filter_pred *pred);
|
||||
extern int filter_add_pred(struct ftrace_event_call *call,
|
||||
struct filter_pred *pred);
|
||||
extern void filter_free_preds(struct ftrace_event_call *call);
|
||||
extern int filter_match_preds(struct ftrace_event_call *call, void *rec);
|
||||
extern void filter_free_subsystem_preds(struct event_subsystem *system);
|
||||
extern int filter_add_subsystem_pred(struct event_subsystem *system,
|
||||
struct filter_pred *pred);
|
||||
|
||||
void event_trace_printk(unsigned long ip, const char *fmt, ...);
|
||||
extern struct ftrace_event_call __start_ftrace_events[];
|
||||
extern struct ftrace_event_call __stop_ftrace_events[];
|
||||
|
||||
#define for_each_event(event) \
|
||||
for (event = __start_ftrace_events; \
|
||||
(unsigned long)event < (unsigned long)__stop_ftrace_events; \
|
||||
event++)
|
||||
|
||||
extern const char *__start___trace_bprintk_fmt[];
|
||||
extern const char *__stop___trace_bprintk_fmt[];
|
||||
|
||||
/*
|
||||
* The double __builtin_constant_p is because gcc will give us an error
|
||||
* if we try to allocate the static variable to fmt if it is not a
|
||||
* constant. Even with the outer if statement optimizing out.
|
||||
*/
|
||||
#define event_trace_printk(ip, fmt, args...) \
|
||||
do { \
|
||||
__trace_printk_check_format(fmt, ##args); \
|
||||
tracing_record_cmdline(current); \
|
||||
if (__builtin_constant_p(fmt)) { \
|
||||
static const char *trace_printk_fmt \
|
||||
__attribute__((section("__trace_printk_fmt"))) = \
|
||||
__builtin_constant_p(fmt) ? fmt : NULL; \
|
||||
\
|
||||
__trace_bprintk(ip, trace_printk_fmt, ##args); \
|
||||
} else \
|
||||
__trace_printk(ip, fmt, ##args); \
|
||||
} while (0)
|
||||
|
||||
#endif /* _LINUX_KERNEL_TRACE_H */
|
||||
|
@@ -11,6 +11,7 @@
|
||||
#include <linux/kallsyms.h>
|
||||
|
||||
#include "trace.h"
|
||||
#include "trace_output.h"
|
||||
|
||||
static struct trace_array *boot_trace;
|
||||
static bool pre_initcalls_finished;
|
||||
@@ -27,13 +28,13 @@ void start_boot_trace(void)
|
||||
|
||||
void enable_boot_trace(void)
|
||||
{
|
||||
if (pre_initcalls_finished)
|
||||
if (boot_trace && pre_initcalls_finished)
|
||||
tracing_start_sched_switch_record();
|
||||
}
|
||||
|
||||
void disable_boot_trace(void)
|
||||
{
|
||||
if (pre_initcalls_finished)
|
||||
if (boot_trace && pre_initcalls_finished)
|
||||
tracing_stop_sched_switch_record();
|
||||
}
|
||||
|
||||
@@ -42,6 +43,9 @@ static int boot_trace_init(struct trace_array *tr)
|
||||
int cpu;
|
||||
boot_trace = tr;
|
||||
|
||||
if (!tr)
|
||||
return 0;
|
||||
|
||||
for_each_cpu(cpu, cpu_possible_mask)
|
||||
tracing_reset(tr, cpu);
|
||||
|
||||
@@ -128,10 +132,9 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
|
||||
{
|
||||
struct ring_buffer_event *event;
|
||||
struct trace_boot_call *entry;
|
||||
unsigned long irq_flags;
|
||||
struct trace_array *tr = boot_trace;
|
||||
|
||||
if (!pre_initcalls_finished)
|
||||
if (!tr || !pre_initcalls_finished)
|
||||
return;
|
||||
|
||||
/* Get its name now since this function could
|
||||
@@ -140,18 +143,13 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
|
||||
sprint_symbol(bt->func, (unsigned long)fn);
|
||||
preempt_disable();
|
||||
|
||||
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
|
||||
&irq_flags);
|
||||
event = trace_buffer_lock_reserve(tr, TRACE_BOOT_CALL,
|
||||
sizeof(*entry), 0, 0);
|
||||
if (!event)
|
||||
goto out;
|
||||
entry = ring_buffer_event_data(event);
|
||||
tracing_generic_entry_update(&entry->ent, 0, 0);
|
||||
entry->ent.type = TRACE_BOOT_CALL;
|
||||
entry->boot_call = *bt;
|
||||
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
|
||||
|
||||
trace_wake_up();
|
||||
|
||||
trace_buffer_unlock_commit(tr, event, 0, 0);
|
||||
out:
|
||||
preempt_enable();
|
||||
}
|
||||
@@ -160,27 +158,21 @@ void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn)
|
||||
{
|
||||
struct ring_buffer_event *event;
|
||||
struct trace_boot_ret *entry;
|
||||
unsigned long irq_flags;
|
||||
struct trace_array *tr = boot_trace;
|
||||
|
||||
if (!pre_initcalls_finished)
|
||||
if (!tr || !pre_initcalls_finished)
|
||||
return;
|
||||
|
||||
sprint_symbol(bt->func, (unsigned long)fn);
|
||||
preempt_disable();
|
||||
|
||||
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
|
||||
&irq_flags);
|
||||
event = trace_buffer_lock_reserve(tr, TRACE_BOOT_RET,
|
||||
sizeof(*entry), 0, 0);
|
||||
if (!event)
|
||||
goto out;
|
||||
entry = ring_buffer_event_data(event);
|
||||
tracing_generic_entry_update(&entry->ent, 0, 0);
|
||||
entry->ent.type = TRACE_BOOT_RET;
|
||||
entry->boot_ret = *bt;
|
||||
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
|
||||
|
||||
trace_wake_up();
|
||||
|
||||
trace_buffer_unlock_commit(tr, event, 0, 0);
|
||||
out:
|
||||
preempt_enable();
|
||||
}
|
||||
|
@@ -14,12 +14,17 @@
|
||||
#include <linux/hash.h>
|
||||
#include <linux/fs.h>
|
||||
#include <asm/local.h>
|
||||
|
||||
#include "trace.h"
|
||||
#include "trace_stat.h"
|
||||
#include "trace_output.h"
|
||||
|
||||
#ifdef CONFIG_BRANCH_TRACER
|
||||
|
||||
static struct tracer branch_trace;
|
||||
static int branch_tracing_enabled __read_mostly;
|
||||
static DEFINE_MUTEX(branch_tracing_mutex);
|
||||
|
||||
static struct trace_array *branch_tracer;
|
||||
|
||||
static void
|
||||
@@ -28,7 +33,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
|
||||
struct trace_array *tr = branch_tracer;
|
||||
struct ring_buffer_event *event;
|
||||
struct trace_branch *entry;
|
||||
unsigned long flags, irq_flags;
|
||||
unsigned long flags;
|
||||
int cpu, pc;
|
||||
const char *p;
|
||||
|
||||
@@ -47,15 +52,13 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
|
||||
if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
|
||||
goto out;
|
||||
|
||||
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
|
||||
&irq_flags);
|
||||
pc = preempt_count();
|
||||
event = trace_buffer_lock_reserve(tr, TRACE_BRANCH,
|
||||
sizeof(*entry), flags, pc);
|
||||
if (!event)
|
||||
goto out;
|
||||
|
||||
pc = preempt_count();
|
||||
entry = ring_buffer_event_data(event);
|
||||
tracing_generic_entry_update(&entry->ent, flags, pc);
|
||||
entry->ent.type = TRACE_BRANCH;
|
||||
|
||||
/* Strip off the path, only save the file */
|
||||
p = f->file + strlen(f->file);
|
||||
@@ -70,7 +73,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
|
||||
entry->line = f->line;
|
||||
entry->correct = val == expect;
|
||||
|
||||
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
|
||||
ring_buffer_unlock_commit(tr->buffer, event);
|
||||
|
||||
out:
|
||||
atomic_dec(&tr->data[cpu]->disabled);
|
||||
@@ -88,8 +91,6 @@ void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
|
||||
|
||||
int enable_branch_tracing(struct trace_array *tr)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&branch_tracing_mutex);
|
||||
branch_tracer = tr;
|
||||
/*
|
||||
@@ -100,7 +101,7 @@ int enable_branch_tracing(struct trace_array *tr)
|
||||
branch_tracing_enabled++;
|
||||
mutex_unlock(&branch_tracing_mutex);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void disable_branch_tracing(void)
|
||||
@@ -128,11 +129,6 @@ static void stop_branch_trace(struct trace_array *tr)
|
||||
|
||||
static int branch_trace_init(struct trace_array *tr)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
tracing_reset(tr, cpu);
|
||||
|
||||
start_branch_trace(tr);
|
||||
return 0;
|
||||
}
|
||||
@@ -142,22 +138,53 @@ static void branch_trace_reset(struct trace_array *tr)
|
||||
stop_branch_trace(tr);
|
||||
}
|
||||
|
||||
struct tracer branch_trace __read_mostly =
|
||||
static enum print_line_t trace_branch_print(struct trace_iterator *iter,
|
||||
int flags)
|
||||
{
|
||||
struct trace_branch *field;
|
||||
|
||||
trace_assign_type(field, iter->ent);
|
||||
|
||||
if (trace_seq_printf(&iter->seq, "[%s] %s:%s:%d\n",
|
||||
field->correct ? " ok " : " MISS ",
|
||||
field->func,
|
||||
field->file,
|
||||
field->line))
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
return TRACE_TYPE_HANDLED;
|
||||
}
|
||||
|
||||
|
||||
static struct trace_event trace_branch_event = {
|
||||
.type = TRACE_BRANCH,
|
||||
.trace = trace_branch_print,
|
||||
};
|
||||
|
||||
static struct tracer branch_trace __read_mostly =
|
||||
{
|
||||
.name = "branch",
|
||||
.init = branch_trace_init,
|
||||
.reset = branch_trace_reset,
|
||||
#ifdef CONFIG_FTRACE_SELFTEST
|
||||
.selftest = trace_selftest_startup_branch,
|
||||
#endif
|
||||
#endif /* CONFIG_FTRACE_SELFTEST */
|
||||
};
|
||||
|
||||
__init static int init_branch_trace(void)
|
||||
__init static int init_branch_tracer(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = register_ftrace_event(&trace_branch_event);
|
||||
if (!ret) {
|
||||
printk(KERN_WARNING "Warning: could not register "
|
||||
"branch events\n");
|
||||
return 1;
|
||||
}
|
||||
return register_tracer(&branch_trace);
|
||||
}
|
||||
device_initcall(init_branch_tracer);
|
||||
|
||||
device_initcall(init_branch_trace);
|
||||
#else
|
||||
static inline
|
||||
void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
|
||||
@@ -183,65 +210,38 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect)
|
||||
}
|
||||
EXPORT_SYMBOL(ftrace_likely_update);
|
||||
|
||||
struct ftrace_pointer {
|
||||
void *start;
|
||||
void *stop;
|
||||
int hit;
|
||||
};
|
||||
extern unsigned long __start_annotated_branch_profile[];
|
||||
extern unsigned long __stop_annotated_branch_profile[];
|
||||
|
||||
static void *
|
||||
t_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
static int annotated_branch_stat_headers(struct seq_file *m)
|
||||
{
|
||||
const struct ftrace_pointer *f = m->private;
|
||||
struct ftrace_branch_data *p = v;
|
||||
|
||||
(*pos)++;
|
||||
|
||||
if (v == (void *)1)
|
||||
return f->start;
|
||||
|
||||
++p;
|
||||
|
||||
if ((void *)p >= (void *)f->stop)
|
||||
return NULL;
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
static void *t_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
void *t = (void *)1;
|
||||
loff_t l = 0;
|
||||
|
||||
for (; t && l < *pos; t = t_next(m, t, &l))
|
||||
;
|
||||
|
||||
return t;
|
||||
}
|
||||
|
||||
static void t_stop(struct seq_file *m, void *p)
|
||||
{
|
||||
}
|
||||
|
||||
static int t_show(struct seq_file *m, void *v)
|
||||
{
|
||||
const struct ftrace_pointer *fp = m->private;
|
||||
struct ftrace_branch_data *p = v;
|
||||
const char *f;
|
||||
long percent;
|
||||
|
||||
if (v == (void *)1) {
|
||||
if (fp->hit)
|
||||
seq_printf(m, " miss hit %% ");
|
||||
else
|
||||
seq_printf(m, " correct incorrect %% ");
|
||||
seq_printf(m, " Function "
|
||||
seq_printf(m, " correct incorrect %% ");
|
||||
seq_printf(m, " Function "
|
||||
" File Line\n"
|
||||
" ------- --------- - "
|
||||
" -------- "
|
||||
" ---- ----\n");
|
||||
return 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline long get_incorrect_percent(struct ftrace_branch_data *p)
|
||||
{
|
||||
long percent;
|
||||
|
||||
if (p->correct) {
|
||||
percent = p->incorrect * 100;
|
||||
percent /= p->correct + p->incorrect;
|
||||
} else
|
||||
percent = p->incorrect ? 100 : -1;
|
||||
|
||||
return percent;
|
||||
}
|
||||
|
||||
static int branch_stat_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct ftrace_branch_data *p = v;
|
||||
const char *f;
|
||||
long percent;
|
||||
|
||||
/* Only print the file, not the path */
|
||||
f = p->file + strlen(p->file);
|
||||
@@ -252,11 +252,7 @@ static int t_show(struct seq_file *m, void *v)
|
||||
/*
|
||||
* The miss is overlayed on correct, and hit on incorrect.
|
||||
*/
|
||||
if (p->correct) {
|
||||
percent = p->incorrect * 100;
|
||||
percent /= p->correct + p->incorrect;
|
||||
} else
|
||||
percent = p->incorrect ? 100 : -1;
|
||||
percent = get_incorrect_percent(p);
|
||||
|
||||
seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect);
|
||||
if (percent < 0)
|
||||
@@ -267,76 +263,118 @@ static int t_show(struct seq_file *m, void *v)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct seq_operations tracing_likely_seq_ops = {
|
||||
.start = t_start,
|
||||
.next = t_next,
|
||||
.stop = t_stop,
|
||||
.show = t_show,
|
||||
static void *annotated_branch_stat_start(void)
|
||||
{
|
||||
return __start_annotated_branch_profile;
|
||||
}
|
||||
|
||||
static void *
|
||||
annotated_branch_stat_next(void *v, int idx)
|
||||
{
|
||||
struct ftrace_branch_data *p = v;
|
||||
|
||||
++p;
|
||||
|
||||
if ((void *)p >= (void *)__stop_annotated_branch_profile)
|
||||
return NULL;
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
static int annotated_branch_stat_cmp(void *p1, void *p2)
|
||||
{
|
||||
struct ftrace_branch_data *a = p1;
|
||||
struct ftrace_branch_data *b = p2;
|
||||
|
||||
long percent_a, percent_b;
|
||||
|
||||
percent_a = get_incorrect_percent(a);
|
||||
percent_b = get_incorrect_percent(b);
|
||||
|
||||
if (percent_a < percent_b)
|
||||
return -1;
|
||||
if (percent_a > percent_b)
|
||||
return 1;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct tracer_stat annotated_branch_stats = {
|
||||
.name = "branch_annotated",
|
||||
.stat_start = annotated_branch_stat_start,
|
||||
.stat_next = annotated_branch_stat_next,
|
||||
.stat_cmp = annotated_branch_stat_cmp,
|
||||
.stat_headers = annotated_branch_stat_headers,
|
||||
.stat_show = branch_stat_show
|
||||
};
|
||||
|
||||
static int tracing_branch_open(struct inode *inode, struct file *file)
|
||||
__init static int init_annotated_branch_stats(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = seq_open(file, &tracing_likely_seq_ops);
|
||||
ret = register_stat_tracer(&annotated_branch_stats);
|
||||
if (!ret) {
|
||||
struct seq_file *m = file->private_data;
|
||||
m->private = (void *)inode->i_private;
|
||||
printk(KERN_WARNING "Warning: could not register "
|
||||
"annotated branches stats\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct file_operations tracing_branch_fops = {
|
||||
.open = tracing_branch_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
};
|
||||
fs_initcall(init_annotated_branch_stats);
|
||||
|
||||
#ifdef CONFIG_PROFILE_ALL_BRANCHES
|
||||
|
||||
extern unsigned long __start_branch_profile[];
|
||||
extern unsigned long __stop_branch_profile[];
|
||||
|
||||
static const struct ftrace_pointer ftrace_branch_pos = {
|
||||
.start = __start_branch_profile,
|
||||
.stop = __stop_branch_profile,
|
||||
.hit = 1,
|
||||
};
|
||||
|
||||
#endif /* CONFIG_PROFILE_ALL_BRANCHES */
|
||||
|
||||
extern unsigned long __start_annotated_branch_profile[];
|
||||
extern unsigned long __stop_annotated_branch_profile[];
|
||||
|
||||
static const struct ftrace_pointer ftrace_annotated_branch_pos = {
|
||||
.start = __start_annotated_branch_profile,
|
||||
.stop = __stop_annotated_branch_profile,
|
||||
};
|
||||
|
||||
static __init int ftrace_branch_init(void)
|
||||
static int all_branch_stat_headers(struct seq_file *m)
|
||||
{
|
||||
struct dentry *d_tracer;
|
||||
struct dentry *entry;
|
||||
|
||||
d_tracer = tracing_init_dentry();
|
||||
|
||||
entry = debugfs_create_file("profile_annotated_branch", 0444, d_tracer,
|
||||
(void *)&ftrace_annotated_branch_pos,
|
||||
&tracing_branch_fops);
|
||||
if (!entry)
|
||||
pr_warning("Could not create debugfs "
|
||||
"'profile_annotatet_branch' entry\n");
|
||||
|
||||
#ifdef CONFIG_PROFILE_ALL_BRANCHES
|
||||
entry = debugfs_create_file("profile_branch", 0444, d_tracer,
|
||||
(void *)&ftrace_branch_pos,
|
||||
&tracing_branch_fops);
|
||||
if (!entry)
|
||||
pr_warning("Could not create debugfs"
|
||||
" 'profile_branch' entry\n");
|
||||
#endif
|
||||
|
||||
seq_printf(m, " miss hit %% ");
|
||||
seq_printf(m, " Function "
|
||||
" File Line\n"
|
||||
" ------- --------- - "
|
||||
" -------- "
|
||||
" ---- ----\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
device_initcall(ftrace_branch_init);
|
||||
static void *all_branch_stat_start(void)
|
||||
{
|
||||
return __start_branch_profile;
|
||||
}
|
||||
|
||||
static void *
|
||||
all_branch_stat_next(void *v, int idx)
|
||||
{
|
||||
struct ftrace_branch_data *p = v;
|
||||
|
||||
++p;
|
||||
|
||||
if ((void *)p >= (void *)__stop_branch_profile)
|
||||
return NULL;
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
static struct tracer_stat all_branch_stats = {
|
||||
.name = "branch_all",
|
||||
.stat_start = all_branch_stat_start,
|
||||
.stat_next = all_branch_stat_next,
|
||||
.stat_headers = all_branch_stat_headers,
|
||||
.stat_show = branch_stat_show
|
||||
};
|
||||
|
||||
__init static int all_annotated_branch_stats(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = register_stat_tracer(&all_branch_stats);
|
||||
if (!ret) {
|
||||
printk(KERN_WARNING "Warning: could not register "
|
||||
"all branches stats\n");
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
fs_initcall(all_annotated_branch_stats);
|
||||
#endif /* CONFIG_PROFILE_ALL_BRANCHES */
|
||||
|
109
kernel/trace/trace_clock.c
Normal file
109
kernel/trace/trace_clock.c
Normal file
@@ -0,0 +1,109 @@
|
||||
/*
|
||||
* tracing clocks
|
||||
*
|
||||
* Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
|
||||
*
|
||||
* Implements 3 trace clock variants, with differing scalability/precision
|
||||
* tradeoffs:
|
||||
*
|
||||
* - local: CPU-local trace clock
|
||||
* - medium: scalable global clock with some jitter
|
||||
* - global: globally monotonic, serialized clock
|
||||
*
|
||||
* Tracer plugins will chose a default from these clocks.
|
||||
*/
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/trace_clock.h>
|
||||
|
||||
/*
|
||||
* trace_clock_local(): the simplest and least coherent tracing clock.
|
||||
*
|
||||
* Useful for tracing that does not cross to other CPUs nor
|
||||
* does it go through idle events.
|
||||
*/
|
||||
u64 notrace trace_clock_local(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
u64 clock;
|
||||
|
||||
/*
|
||||
* sched_clock() is an architecture implemented, fast, scalable,
|
||||
* lockless clock. It is not guaranteed to be coherent across
|
||||
* CPUs, nor across CPU idle events.
|
||||
*/
|
||||
raw_local_irq_save(flags);
|
||||
clock = sched_clock();
|
||||
raw_local_irq_restore(flags);
|
||||
|
||||
return clock;
|
||||
}
|
||||
|
||||
/*
|
||||
* trace_clock(): 'inbetween' trace clock. Not completely serialized,
|
||||
* but not completely incorrect when crossing CPUs either.
|
||||
*
|
||||
* This is based on cpu_clock(), which will allow at most ~1 jiffy of
|
||||
* jitter between CPUs. So it's a pretty scalable clock, but there
|
||||
* can be offsets in the trace data.
|
||||
*/
|
||||
u64 notrace trace_clock(void)
|
||||
{
|
||||
return cpu_clock(raw_smp_processor_id());
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* trace_clock_global(): special globally coherent trace clock
|
||||
*
|
||||
* It has higher overhead than the other trace clocks but is still
|
||||
* an order of magnitude faster than GTOD derived hardware clocks.
|
||||
*
|
||||
* Used by plugins that need globally coherent timestamps.
|
||||
*/
|
||||
|
||||
static u64 prev_trace_clock_time;
|
||||
|
||||
static raw_spinlock_t trace_clock_lock ____cacheline_aligned_in_smp =
|
||||
(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
||||
|
||||
u64 notrace trace_clock_global(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
int this_cpu;
|
||||
u64 now;
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
|
||||
this_cpu = raw_smp_processor_id();
|
||||
now = cpu_clock(this_cpu);
|
||||
/*
|
||||
* If in an NMI context then dont risk lockups and return the
|
||||
* cpu_clock() time:
|
||||
*/
|
||||
if (unlikely(in_nmi()))
|
||||
goto out;
|
||||
|
||||
__raw_spin_lock(&trace_clock_lock);
|
||||
|
||||
/*
|
||||
* TODO: if this happens often then maybe we should reset
|
||||
* my_scd->clock to prev_trace_clock_time+1, to make sure
|
||||
* we start ticking with the local clock from now on?
|
||||
*/
|
||||
if ((s64)(now - prev_trace_clock_time) < 0)
|
||||
now = prev_trace_clock_time + 1;
|
||||
|
||||
prev_trace_clock_time = now;
|
||||
|
||||
__raw_spin_unlock(&trace_clock_lock);
|
||||
|
||||
out:
|
||||
raw_local_irq_restore(flags);
|
||||
|
||||
return now;
|
||||
}
|
31
kernel/trace/trace_event_profile.c
Normal file
31
kernel/trace/trace_event_profile.c
Normal file
@@ -0,0 +1,31 @@
|
||||
/*
|
||||
* trace event based perf counter profiling
|
||||
*
|
||||
* Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#include "trace.h"
|
||||
|
||||
int ftrace_profile_enable(int event_id)
|
||||
{
|
||||
struct ftrace_event_call *event;
|
||||
|
||||
for_each_event(event) {
|
||||
if (event->id == event_id)
|
||||
return event->profile_enable(event);
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
void ftrace_profile_disable(int event_id)
|
||||
{
|
||||
struct ftrace_event_call *event;
|
||||
|
||||
for_each_event(event) {
|
||||
if (event->id == event_id)
|
||||
return event->profile_disable(event);
|
||||
}
|
||||
}
|
||||
|
173
kernel/trace/trace_event_types.h
Normal file
173
kernel/trace/trace_event_types.h
Normal file
@@ -0,0 +1,173 @@
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM ftrace
|
||||
|
||||
/*
|
||||
* We cheat and use the proto type field as the ID
|
||||
* and args as the entry type (minus 'struct')
|
||||
*/
|
||||
TRACE_EVENT_FORMAT(function, TRACE_FN, ftrace_entry, ignore,
|
||||
TRACE_STRUCT(
|
||||
TRACE_FIELD(unsigned long, ip, ip)
|
||||
TRACE_FIELD(unsigned long, parent_ip, parent_ip)
|
||||
),
|
||||
TP_RAW_FMT(" %lx <-- %lx")
|
||||
);
|
||||
|
||||
TRACE_EVENT_FORMAT(funcgraph_entry, TRACE_GRAPH_ENT,
|
||||
ftrace_graph_ent_entry, ignore,
|
||||
TRACE_STRUCT(
|
||||
TRACE_FIELD(unsigned long, graph_ent.func, func)
|
||||
TRACE_FIELD(int, graph_ent.depth, depth)
|
||||
),
|
||||
TP_RAW_FMT("--> %lx (%d)")
|
||||
);
|
||||
|
||||
TRACE_EVENT_FORMAT(funcgraph_exit, TRACE_GRAPH_RET,
|
||||
ftrace_graph_ret_entry, ignore,
|
||||
TRACE_STRUCT(
|
||||
TRACE_FIELD(unsigned long, ret.func, func)
|
||||
TRACE_FIELD(int, ret.depth, depth)
|
||||
),
|
||||
TP_RAW_FMT("<-- %lx (%d)")
|
||||
);
|
||||
|
||||
TRACE_EVENT_FORMAT(wakeup, TRACE_WAKE, ctx_switch_entry, ignore,
|
||||
TRACE_STRUCT(
|
||||
TRACE_FIELD(unsigned int, prev_pid, prev_pid)
|
||||
TRACE_FIELD(unsigned char, prev_prio, prev_prio)
|
||||
TRACE_FIELD(unsigned char, prev_state, prev_state)
|
||||
TRACE_FIELD(unsigned int, next_pid, next_pid)
|
||||
TRACE_FIELD(unsigned char, next_prio, next_prio)
|
||||
TRACE_FIELD(unsigned char, next_state, next_state)
|
||||
TRACE_FIELD(unsigned int, next_cpu, next_cpu)
|
||||
),
|
||||
TP_RAW_FMT("%u:%u:%u ==+ %u:%u:%u [%03u]")
|
||||
);
|
||||
|
||||
TRACE_EVENT_FORMAT(context_switch, TRACE_CTX, ctx_switch_entry, ignore,
|
||||
TRACE_STRUCT(
|
||||
TRACE_FIELD(unsigned int, prev_pid, prev_pid)
|
||||
TRACE_FIELD(unsigned char, prev_prio, prev_prio)
|
||||
TRACE_FIELD(unsigned char, prev_state, prev_state)
|
||||
TRACE_FIELD(unsigned int, next_pid, next_pid)
|
||||
TRACE_FIELD(unsigned char, next_prio, next_prio)
|
||||
TRACE_FIELD(unsigned char, next_state, next_state)
|
||||
TRACE_FIELD(unsigned int, next_cpu, next_cpu)
|
||||
),
|
||||
TP_RAW_FMT("%u:%u:%u ==+ %u:%u:%u [%03u]")
|
||||
);
|
||||
|
||||
TRACE_EVENT_FORMAT(special, TRACE_SPECIAL, special_entry, ignore,
|
||||
TRACE_STRUCT(
|
||||
TRACE_FIELD(unsigned long, arg1, arg1)
|
||||
TRACE_FIELD(unsigned long, arg2, arg2)
|
||||
TRACE_FIELD(unsigned long, arg3, arg3)
|
||||
),
|
||||
TP_RAW_FMT("(%08lx) (%08lx) (%08lx)")
|
||||
);
|
||||
|
||||
/*
|
||||
* Stack-trace entry:
|
||||
*/
|
||||
|
||||
/* #define FTRACE_STACK_ENTRIES 8 */
|
||||
|
||||
TRACE_EVENT_FORMAT(kernel_stack, TRACE_STACK, stack_entry, ignore,
|
||||
TRACE_STRUCT(
|
||||
TRACE_FIELD(unsigned long, caller[0], stack0)
|
||||
TRACE_FIELD(unsigned long, caller[1], stack1)
|
||||
TRACE_FIELD(unsigned long, caller[2], stack2)
|
||||
TRACE_FIELD(unsigned long, caller[3], stack3)
|
||||
TRACE_FIELD(unsigned long, caller[4], stack4)
|
||||
TRACE_FIELD(unsigned long, caller[5], stack5)
|
||||
TRACE_FIELD(unsigned long, caller[6], stack6)
|
||||
TRACE_FIELD(unsigned long, caller[7], stack7)
|
||||
),
|
||||
TP_RAW_FMT("\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n"
|
||||
"\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n")
|
||||
);
|
||||
|
||||
TRACE_EVENT_FORMAT(user_stack, TRACE_USER_STACK, userstack_entry, ignore,
|
||||
TRACE_STRUCT(
|
||||
TRACE_FIELD(unsigned long, caller[0], stack0)
|
||||
TRACE_FIELD(unsigned long, caller[1], stack1)
|
||||
TRACE_FIELD(unsigned long, caller[2], stack2)
|
||||
TRACE_FIELD(unsigned long, caller[3], stack3)
|
||||
TRACE_FIELD(unsigned long, caller[4], stack4)
|
||||
TRACE_FIELD(unsigned long, caller[5], stack5)
|
||||
TRACE_FIELD(unsigned long, caller[6], stack6)
|
||||
TRACE_FIELD(unsigned long, caller[7], stack7)
|
||||
),
|
||||
TP_RAW_FMT("\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n"
|
||||
"\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n")
|
||||
);
|
||||
|
||||
TRACE_EVENT_FORMAT(bprint, TRACE_BPRINT, bprint_entry, ignore,
|
||||
TRACE_STRUCT(
|
||||
TRACE_FIELD(unsigned long, ip, ip)
|
||||
TRACE_FIELD(char *, fmt, fmt)
|
||||
TRACE_FIELD_ZERO_CHAR(buf)
|
||||
),
|
||||
TP_RAW_FMT("%08lx (%d) fmt:%p %s")
|
||||
);
|
||||
|
||||
TRACE_EVENT_FORMAT(print, TRACE_PRINT, print_entry, ignore,
|
||||
TRACE_STRUCT(
|
||||
TRACE_FIELD(unsigned long, ip, ip)
|
||||
TRACE_FIELD_ZERO_CHAR(buf)
|
||||
),
|
||||
TP_RAW_FMT("%08lx (%d) fmt:%p %s")
|
||||
);
|
||||
|
||||
TRACE_EVENT_FORMAT(branch, TRACE_BRANCH, trace_branch, ignore,
|
||||
TRACE_STRUCT(
|
||||
TRACE_FIELD(unsigned int, line, line)
|
||||
TRACE_FIELD_SPECIAL(char func[TRACE_FUNC_SIZE+1], func, func)
|
||||
TRACE_FIELD_SPECIAL(char file[TRACE_FUNC_SIZE+1], file, file)
|
||||
TRACE_FIELD(char, correct, correct)
|
||||
),
|
||||
TP_RAW_FMT("%u:%s:%s (%u)")
|
||||
);
|
||||
|
||||
TRACE_EVENT_FORMAT(hw_branch, TRACE_HW_BRANCHES, hw_branch_entry, ignore,
|
||||
TRACE_STRUCT(
|
||||
TRACE_FIELD(u64, from, from)
|
||||
TRACE_FIELD(u64, to, to)
|
||||
),
|
||||
TP_RAW_FMT("from: %llx to: %llx")
|
||||
);
|
||||
|
||||
TRACE_EVENT_FORMAT(power, TRACE_POWER, trace_power, ignore,
|
||||
TRACE_STRUCT(
|
||||
TRACE_FIELD(ktime_t, state_data.stamp, stamp)
|
||||
TRACE_FIELD(ktime_t, state_data.end, end)
|
||||
TRACE_FIELD(int, state_data.type, type)
|
||||
TRACE_FIELD(int, state_data.state, state)
|
||||
),
|
||||
TP_RAW_FMT("%llx->%llx type:%u state:%u")
|
||||
);
|
||||
|
||||
TRACE_EVENT_FORMAT(kmem_alloc, TRACE_KMEM_ALLOC, kmemtrace_alloc_entry, ignore,
|
||||
TRACE_STRUCT(
|
||||
TRACE_FIELD(enum kmemtrace_type_id, type_id, type_id)
|
||||
TRACE_FIELD(unsigned long, call_site, call_site)
|
||||
TRACE_FIELD(const void *, ptr, ptr)
|
||||
TRACE_FIELD(size_t, bytes_req, bytes_req)
|
||||
TRACE_FIELD(size_t, bytes_alloc, bytes_alloc)
|
||||
TRACE_FIELD(gfp_t, gfp_flags, gfp_flags)
|
||||
TRACE_FIELD(int, node, node)
|
||||
),
|
||||
TP_RAW_FMT("type:%u call_site:%lx ptr:%p req:%lu alloc:%lu"
|
||||
" flags:%x node:%d")
|
||||
);
|
||||
|
||||
TRACE_EVENT_FORMAT(kmem_free, TRACE_KMEM_FREE, kmemtrace_free_entry, ignore,
|
||||
TRACE_STRUCT(
|
||||
TRACE_FIELD(enum kmemtrace_type_id, type_id, type_id)
|
||||
TRACE_FIELD(unsigned long, call_site, call_site)
|
||||
TRACE_FIELD(const void *, ptr, ptr)
|
||||
),
|
||||
TP_RAW_FMT("type:%u call_site:%lx ptr:%p")
|
||||
);
|
||||
|
||||
#undef TRACE_SYSTEM
|
824
kernel/trace/trace_events.c
Normal file
824
kernel/trace/trace_events.c
Normal file
@@ -0,0 +1,824 @@
|
||||
/*
|
||||
* event tracer
|
||||
*
|
||||
* Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
|
||||
*
|
||||
* - Added format output of fields of the trace point.
|
||||
* This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/ctype.h>
|
||||
|
||||
#include "trace_output.h"
|
||||
|
||||
#define TRACE_SYSTEM "TRACE_SYSTEM"
|
||||
|
||||
static DEFINE_MUTEX(event_mutex);
|
||||
|
||||
int trace_define_field(struct ftrace_event_call *call, char *type,
|
||||
char *name, int offset, int size)
|
||||
{
|
||||
struct ftrace_event_field *field;
|
||||
|
||||
field = kzalloc(sizeof(*field), GFP_KERNEL);
|
||||
if (!field)
|
||||
goto err;
|
||||
|
||||
field->name = kstrdup(name, GFP_KERNEL);
|
||||
if (!field->name)
|
||||
goto err;
|
||||
|
||||
field->type = kstrdup(type, GFP_KERNEL);
|
||||
if (!field->type)
|
||||
goto err;
|
||||
|
||||
field->offset = offset;
|
||||
field->size = size;
|
||||
list_add(&field->link, &call->fields);
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
if (field) {
|
||||
kfree(field->name);
|
||||
kfree(field->type);
|
||||
}
|
||||
kfree(field);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void ftrace_clear_events(void)
|
||||
{
|
||||
struct ftrace_event_call *call = (void *)__start_ftrace_events;
|
||||
|
||||
|
||||
while ((unsigned long)call < (unsigned long)__stop_ftrace_events) {
|
||||
|
||||
if (call->enabled) {
|
||||
call->enabled = 0;
|
||||
call->unregfunc();
|
||||
}
|
||||
call++;
|
||||
}
|
||||
}
|
||||
|
||||
static void ftrace_event_enable_disable(struct ftrace_event_call *call,
|
||||
int enable)
|
||||
{
|
||||
|
||||
switch (enable) {
|
||||
case 0:
|
||||
if (call->enabled) {
|
||||
call->enabled = 0;
|
||||
call->unregfunc();
|
||||
}
|
||||
break;
|
||||
case 1:
|
||||
if (!call->enabled) {
|
||||
call->enabled = 1;
|
||||
call->regfunc();
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int ftrace_set_clr_event(char *buf, int set)
|
||||
{
|
||||
struct ftrace_event_call *call = __start_ftrace_events;
|
||||
char *event = NULL, *sub = NULL, *match;
|
||||
int ret = -EINVAL;
|
||||
|
||||
/*
|
||||
* The buf format can be <subsystem>:<event-name>
|
||||
* *:<event-name> means any event by that name.
|
||||
* :<event-name> is the same.
|
||||
*
|
||||
* <subsystem>:* means all events in that subsystem
|
||||
* <subsystem>: means the same.
|
||||
*
|
||||
* <name> (no ':') means all events in a subsystem with
|
||||
* the name <name> or any event that matches <name>
|
||||
*/
|
||||
|
||||
match = strsep(&buf, ":");
|
||||
if (buf) {
|
||||
sub = match;
|
||||
event = buf;
|
||||
match = NULL;
|
||||
|
||||
if (!strlen(sub) || strcmp(sub, "*") == 0)
|
||||
sub = NULL;
|
||||
if (!strlen(event) || strcmp(event, "*") == 0)
|
||||
event = NULL;
|
||||
}
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
for_each_event(call) {
|
||||
|
||||
if (!call->name || !call->regfunc)
|
||||
continue;
|
||||
|
||||
if (match &&
|
||||
strcmp(match, call->name) != 0 &&
|
||||
strcmp(match, call->system) != 0)
|
||||
continue;
|
||||
|
||||
if (sub && strcmp(sub, call->system) != 0)
|
||||
continue;
|
||||
|
||||
if (event && strcmp(event, call->name) != 0)
|
||||
continue;
|
||||
|
||||
ftrace_event_enable_disable(call, set);
|
||||
|
||||
ret = 0;
|
||||
}
|
||||
mutex_unlock(&event_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* 128 should be much more than enough */
|
||||
#define EVENT_BUF_SIZE 127
|
||||
|
||||
static ssize_t
|
||||
ftrace_event_write(struct file *file, const char __user *ubuf,
|
||||
size_t cnt, loff_t *ppos)
|
||||
{
|
||||
size_t read = 0;
|
||||
int i, set = 1;
|
||||
ssize_t ret;
|
||||
char *buf;
|
||||
char ch;
|
||||
|
||||
if (!cnt || cnt < 0)
|
||||
return 0;
|
||||
|
||||
ret = tracing_update_buffers();
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = get_user(ch, ubuf++);
|
||||
if (ret)
|
||||
return ret;
|
||||
read++;
|
||||
cnt--;
|
||||
|
||||
/* skip white space */
|
||||
while (cnt && isspace(ch)) {
|
||||
ret = get_user(ch, ubuf++);
|
||||
if (ret)
|
||||
return ret;
|
||||
read++;
|
||||
cnt--;
|
||||
}
|
||||
|
||||
/* Only white space found? */
|
||||
if (isspace(ch)) {
|
||||
file->f_pos += read;
|
||||
ret = read;
|
||||
return ret;
|
||||
}
|
||||
|
||||
buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
if (cnt > EVENT_BUF_SIZE)
|
||||
cnt = EVENT_BUF_SIZE;
|
||||
|
||||
i = 0;
|
||||
while (cnt && !isspace(ch)) {
|
||||
if (!i && ch == '!')
|
||||
set = 0;
|
||||
else
|
||||
buf[i++] = ch;
|
||||
|
||||
ret = get_user(ch, ubuf++);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
read++;
|
||||
cnt--;
|
||||
}
|
||||
buf[i] = 0;
|
||||
|
||||
file->f_pos += read;
|
||||
|
||||
ret = ftrace_set_clr_event(buf, set);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
ret = read;
|
||||
|
||||
out_free:
|
||||
kfree(buf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void *
|
||||
t_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
{
|
||||
struct ftrace_event_call *call = m->private;
|
||||
struct ftrace_event_call *next = call;
|
||||
|
||||
(*pos)++;
|
||||
|
||||
for (;;) {
|
||||
if ((unsigned long)call >= (unsigned long)__stop_ftrace_events)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* The ftrace subsystem is for showing formats only.
|
||||
* They can not be enabled or disabled via the event files.
|
||||
*/
|
||||
if (call->regfunc)
|
||||
break;
|
||||
|
||||
call++;
|
||||
next = call;
|
||||
}
|
||||
|
||||
m->private = ++next;
|
||||
|
||||
return call;
|
||||
}
|
||||
|
||||
static void *t_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
return t_next(m, NULL, pos);
|
||||
}
|
||||
|
||||
static void *
|
||||
s_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
{
|
||||
struct ftrace_event_call *call = m->private;
|
||||
struct ftrace_event_call *next;
|
||||
|
||||
(*pos)++;
|
||||
|
||||
retry:
|
||||
if ((unsigned long)call >= (unsigned long)__stop_ftrace_events)
|
||||
return NULL;
|
||||
|
||||
if (!call->enabled) {
|
||||
call++;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
next = call;
|
||||
m->private = ++next;
|
||||
|
||||
return call;
|
||||
}
|
||||
|
||||
static void *s_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
return s_next(m, NULL, pos);
|
||||
}
|
||||
|
||||
static int t_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct ftrace_event_call *call = v;
|
||||
|
||||
if (strcmp(call->system, TRACE_SYSTEM) != 0)
|
||||
seq_printf(m, "%s:", call->system);
|
||||
seq_printf(m, "%s\n", call->name);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void t_stop(struct seq_file *m, void *p)
|
||||
{
|
||||
}
|
||||
|
||||
static int
|
||||
ftrace_event_seq_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
int ret;
|
||||
const struct seq_operations *seq_ops;
|
||||
|
||||
if ((file->f_mode & FMODE_WRITE) &&
|
||||
!(file->f_flags & O_APPEND))
|
||||
ftrace_clear_events();
|
||||
|
||||
seq_ops = inode->i_private;
|
||||
ret = seq_open(file, seq_ops);
|
||||
if (!ret) {
|
||||
struct seq_file *m = file->private_data;
|
||||
|
||||
m->private = __start_ftrace_events;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct ftrace_event_call *call = filp->private_data;
|
||||
char *buf;
|
||||
|
||||
if (call->enabled)
|
||||
buf = "1\n";
|
||||
else
|
||||
buf = "0\n";
|
||||
|
||||
return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct ftrace_event_call *call = filp->private_data;
|
||||
char buf[64];
|
||||
unsigned long val;
|
||||
int ret;
|
||||
|
||||
if (cnt >= sizeof(buf))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&buf, ubuf, cnt))
|
||||
return -EFAULT;
|
||||
|
||||
buf[cnt] = 0;
|
||||
|
||||
ret = strict_strtoul(buf, 10, &val);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = tracing_update_buffers();
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
switch (val) {
|
||||
case 0:
|
||||
case 1:
|
||||
mutex_lock(&event_mutex);
|
||||
ftrace_event_enable_disable(call, val);
|
||||
mutex_unlock(&event_mutex);
|
||||
break;
|
||||
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*ppos += cnt;
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
#undef FIELD
|
||||
#define FIELD(type, name) \
|
||||
#type, "common_" #name, offsetof(typeof(field), name), \
|
||||
sizeof(field.name)
|
||||
|
||||
static int trace_write_header(struct trace_seq *s)
|
||||
{
|
||||
struct trace_entry field;
|
||||
|
||||
/* struct trace_entry */
|
||||
return trace_seq_printf(s,
|
||||
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
|
||||
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
|
||||
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
|
||||
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
|
||||
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
|
||||
"\n",
|
||||
FIELD(unsigned char, type),
|
||||
FIELD(unsigned char, flags),
|
||||
FIELD(unsigned char, preempt_count),
|
||||
FIELD(int, pid),
|
||||
FIELD(int, tgid));
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct ftrace_event_call *call = filp->private_data;
|
||||
struct trace_seq *s;
|
||||
char *buf;
|
||||
int r;
|
||||
|
||||
if (*ppos)
|
||||
return 0;
|
||||
|
||||
s = kmalloc(sizeof(*s), GFP_KERNEL);
|
||||
if (!s)
|
||||
return -ENOMEM;
|
||||
|
||||
trace_seq_init(s);
|
||||
|
||||
/* If any of the first writes fail, so will the show_format. */
|
||||
|
||||
trace_seq_printf(s, "name: %s\n", call->name);
|
||||
trace_seq_printf(s, "ID: %d\n", call->id);
|
||||
trace_seq_printf(s, "format:\n");
|
||||
trace_write_header(s);
|
||||
|
||||
r = call->show_format(s);
|
||||
if (!r) {
|
||||
/*
|
||||
* ug! The format output is bigger than a PAGE!!
|
||||
*/
|
||||
buf = "FORMAT TOO BIG\n";
|
||||
r = simple_read_from_buffer(ubuf, cnt, ppos,
|
||||
buf, strlen(buf));
|
||||
goto out;
|
||||
}
|
||||
|
||||
r = simple_read_from_buffer(ubuf, cnt, ppos,
|
||||
s->buffer, s->len);
|
||||
out:
|
||||
kfree(s);
|
||||
return r;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
|
||||
{
|
||||
struct ftrace_event_call *call = filp->private_data;
|
||||
struct trace_seq *s;
|
||||
int r;
|
||||
|
||||
if (*ppos)
|
||||
return 0;
|
||||
|
||||
s = kmalloc(sizeof(*s), GFP_KERNEL);
|
||||
if (!s)
|
||||
return -ENOMEM;
|
||||
|
||||
trace_seq_init(s);
|
||||
trace_seq_printf(s, "%d\n", call->id);
|
||||
|
||||
r = simple_read_from_buffer(ubuf, cnt, ppos,
|
||||
s->buffer, s->len);
|
||||
kfree(s);
|
||||
return r;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct ftrace_event_call *call = filp->private_data;
|
||||
struct trace_seq *s;
|
||||
int r;
|
||||
|
||||
if (*ppos)
|
||||
return 0;
|
||||
|
||||
s = kmalloc(sizeof(*s), GFP_KERNEL);
|
||||
if (!s)
|
||||
return -ENOMEM;
|
||||
|
||||
trace_seq_init(s);
|
||||
|
||||
filter_print_preds(call->preds, s);
|
||||
r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
|
||||
|
||||
kfree(s);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct ftrace_event_call *call = filp->private_data;
|
||||
char buf[64], *pbuf = buf;
|
||||
struct filter_pred *pred;
|
||||
int err;
|
||||
|
||||
if (cnt >= sizeof(buf))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&buf, ubuf, cnt))
|
||||
return -EFAULT;
|
||||
|
||||
pred = kzalloc(sizeof(*pred), GFP_KERNEL);
|
||||
if (!pred)
|
||||
return -ENOMEM;
|
||||
|
||||
err = filter_parse(&pbuf, pred);
|
||||
if (err < 0) {
|
||||
filter_free_pred(pred);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (pred->clear) {
|
||||
filter_free_preds(call);
|
||||
filter_free_pred(pred);
|
||||
return cnt;
|
||||
}
|
||||
|
||||
if (filter_add_pred(call, pred)) {
|
||||
filter_free_pred(pred);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*ppos += cnt;
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct event_subsystem *system = filp->private_data;
|
||||
struct trace_seq *s;
|
||||
int r;
|
||||
|
||||
if (*ppos)
|
||||
return 0;
|
||||
|
||||
s = kmalloc(sizeof(*s), GFP_KERNEL);
|
||||
if (!s)
|
||||
return -ENOMEM;
|
||||
|
||||
trace_seq_init(s);
|
||||
|
||||
filter_print_preds(system->preds, s);
|
||||
r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
|
||||
|
||||
kfree(s);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct event_subsystem *system = filp->private_data;
|
||||
char buf[64], *pbuf = buf;
|
||||
struct filter_pred *pred;
|
||||
int err;
|
||||
|
||||
if (cnt >= sizeof(buf))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&buf, ubuf, cnt))
|
||||
return -EFAULT;
|
||||
|
||||
pred = kzalloc(sizeof(*pred), GFP_KERNEL);
|
||||
if (!pred)
|
||||
return -ENOMEM;
|
||||
|
||||
err = filter_parse(&pbuf, pred);
|
||||
if (err < 0) {
|
||||
filter_free_pred(pred);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (pred->clear) {
|
||||
filter_free_subsystem_preds(system);
|
||||
filter_free_pred(pred);
|
||||
return cnt;
|
||||
}
|
||||
|
||||
if (filter_add_subsystem_pred(system, pred)) {
|
||||
filter_free_subsystem_preds(system);
|
||||
filter_free_pred(pred);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*ppos += cnt;
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
static const struct seq_operations show_event_seq_ops = {
|
||||
.start = t_start,
|
||||
.next = t_next,
|
||||
.show = t_show,
|
||||
.stop = t_stop,
|
||||
};
|
||||
|
||||
static const struct seq_operations show_set_event_seq_ops = {
|
||||
.start = s_start,
|
||||
.next = s_next,
|
||||
.show = t_show,
|
||||
.stop = t_stop,
|
||||
};
|
||||
|
||||
static const struct file_operations ftrace_avail_fops = {
|
||||
.open = ftrace_event_seq_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release,
|
||||
};
|
||||
|
||||
static const struct file_operations ftrace_set_event_fops = {
|
||||
.open = ftrace_event_seq_open,
|
||||
.read = seq_read,
|
||||
.write = ftrace_event_write,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release,
|
||||
};
|
||||
|
||||
static const struct file_operations ftrace_enable_fops = {
|
||||
.open = tracing_open_generic,
|
||||
.read = event_enable_read,
|
||||
.write = event_enable_write,
|
||||
};
|
||||
|
||||
static const struct file_operations ftrace_event_format_fops = {
|
||||
.open = tracing_open_generic,
|
||||
.read = event_format_read,
|
||||
};
|
||||
|
||||
static const struct file_operations ftrace_event_id_fops = {
|
||||
.open = tracing_open_generic,
|
||||
.read = event_id_read,
|
||||
};
|
||||
|
||||
static const struct file_operations ftrace_event_filter_fops = {
|
||||
.open = tracing_open_generic,
|
||||
.read = event_filter_read,
|
||||
.write = event_filter_write,
|
||||
};
|
||||
|
||||
static const struct file_operations ftrace_subsystem_filter_fops = {
|
||||
.open = tracing_open_generic,
|
||||
.read = subsystem_filter_read,
|
||||
.write = subsystem_filter_write,
|
||||
};
|
||||
|
||||
static struct dentry *event_trace_events_dir(void)
|
||||
{
|
||||
static struct dentry *d_tracer;
|
||||
static struct dentry *d_events;
|
||||
|
||||
if (d_events)
|
||||
return d_events;
|
||||
|
||||
d_tracer = tracing_init_dentry();
|
||||
if (!d_tracer)
|
||||
return NULL;
|
||||
|
||||
d_events = debugfs_create_dir("events", d_tracer);
|
||||
if (!d_events)
|
||||
pr_warning("Could not create debugfs "
|
||||
"'events' directory\n");
|
||||
|
||||
return d_events;
|
||||
}
|
||||
|
||||
static LIST_HEAD(event_subsystems);
|
||||
|
||||
static struct dentry *
|
||||
event_subsystem_dir(const char *name, struct dentry *d_events)
|
||||
{
|
||||
struct event_subsystem *system;
|
||||
|
||||
/* First see if we did not already create this dir */
|
||||
list_for_each_entry(system, &event_subsystems, list) {
|
||||
if (strcmp(system->name, name) == 0)
|
||||
return system->entry;
|
||||
}
|
||||
|
||||
/* need to create new entry */
|
||||
system = kmalloc(sizeof(*system), GFP_KERNEL);
|
||||
if (!system) {
|
||||
pr_warning("No memory to create event subsystem %s\n",
|
||||
name);
|
||||
return d_events;
|
||||
}
|
||||
|
||||
system->entry = debugfs_create_dir(name, d_events);
|
||||
if (!system->entry) {
|
||||
pr_warning("Could not create event subsystem %s\n",
|
||||
name);
|
||||
kfree(system);
|
||||
return d_events;
|
||||
}
|
||||
|
||||
system->name = name;
|
||||
list_add(&system->list, &event_subsystems);
|
||||
|
||||
system->preds = NULL;
|
||||
|
||||
return system->entry;
|
||||
}
|
||||
|
||||
static int
|
||||
event_create_dir(struct ftrace_event_call *call, struct dentry *d_events)
|
||||
{
|
||||
struct dentry *entry;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* If the trace point header did not define TRACE_SYSTEM
|
||||
* then the system would be called "TRACE_SYSTEM".
|
||||
*/
|
||||
if (strcmp(call->system, "TRACE_SYSTEM") != 0)
|
||||
d_events = event_subsystem_dir(call->system, d_events);
|
||||
|
||||
if (call->raw_init) {
|
||||
ret = call->raw_init();
|
||||
if (ret < 0) {
|
||||
pr_warning("Could not initialize trace point"
|
||||
" events/%s\n", call->name);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
call->dir = debugfs_create_dir(call->name, d_events);
|
||||
if (!call->dir) {
|
||||
pr_warning("Could not create debugfs "
|
||||
"'%s' directory\n", call->name);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (call->regfunc) {
|
||||
entry = debugfs_create_file("enable", 0644, call->dir, call,
|
||||
&ftrace_enable_fops);
|
||||
if (!entry)
|
||||
pr_warning("Could not create debugfs "
|
||||
"'%s/enable' entry\n", call->name);
|
||||
}
|
||||
|
||||
if (call->id) {
|
||||
entry = debugfs_create_file("id", 0444, call->dir, call,
|
||||
&ftrace_event_id_fops);
|
||||
if (!entry)
|
||||
pr_warning("Could not create debugfs '%s/id' entry\n",
|
||||
call->name);
|
||||
}
|
||||
|
||||
if (call->define_fields) {
|
||||
ret = call->define_fields();
|
||||
if (ret < 0) {
|
||||
pr_warning("Could not initialize trace point"
|
||||
" events/%s\n", call->name);
|
||||
return ret;
|
||||
}
|
||||
entry = debugfs_create_file("filter", 0644, call->dir, call,
|
||||
&ftrace_event_filter_fops);
|
||||
if (!entry)
|
||||
pr_warning("Could not create debugfs "
|
||||
"'%s/filter' entry\n", call->name);
|
||||
}
|
||||
|
||||
/* A trace may not want to export its format */
|
||||
if (!call->show_format)
|
||||
return 0;
|
||||
|
||||
entry = debugfs_create_file("format", 0444, call->dir, call,
|
||||
&ftrace_event_format_fops);
|
||||
if (!entry)
|
||||
pr_warning("Could not create debugfs "
|
||||
"'%s/format' entry\n", call->name);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __init int event_trace_init(void)
|
||||
{
|
||||
struct ftrace_event_call *call = __start_ftrace_events;
|
||||
struct dentry *d_tracer;
|
||||
struct dentry *entry;
|
||||
struct dentry *d_events;
|
||||
|
||||
d_tracer = tracing_init_dentry();
|
||||
if (!d_tracer)
|
||||
return 0;
|
||||
|
||||
entry = debugfs_create_file("available_events", 0444, d_tracer,
|
||||
(void *)&show_event_seq_ops,
|
||||
&ftrace_avail_fops);
|
||||
if (!entry)
|
||||
pr_warning("Could not create debugfs "
|
||||
"'available_events' entry\n");
|
||||
|
||||
entry = debugfs_create_file("set_event", 0644, d_tracer,
|
||||
(void *)&show_set_event_seq_ops,
|
||||
&ftrace_set_event_fops);
|
||||
if (!entry)
|
||||
pr_warning("Could not create debugfs "
|
||||
"'set_event' entry\n");
|
||||
|
||||
d_events = event_trace_events_dir();
|
||||
if (!d_events)
|
||||
return 0;
|
||||
|
||||
for_each_event(call) {
|
||||
/* The linker may leave blanks */
|
||||
if (!call->name)
|
||||
continue;
|
||||
event_create_dir(call, d_events);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
fs_initcall(event_trace_init);
|
427
kernel/trace/trace_events_filter.c
Normal file
427
kernel/trace/trace_events_filter.c
Normal file
@@ -0,0 +1,427 @@
|
||||
/*
|
||||
* trace_events_filter - generic event filtering
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
*
|
||||
* Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
|
||||
*/
|
||||
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/ctype.h>
|
||||
|
||||
#include "trace.h"
|
||||
#include "trace_output.h"
|
||||
|
||||
static int filter_pred_64(struct filter_pred *pred, void *event)
|
||||
{
|
||||
u64 *addr = (u64 *)(event + pred->offset);
|
||||
u64 val = (u64)pred->val;
|
||||
int match;
|
||||
|
||||
match = (val == *addr) ^ pred->not;
|
||||
|
||||
return match;
|
||||
}
|
||||
|
||||
static int filter_pred_32(struct filter_pred *pred, void *event)
|
||||
{
|
||||
u32 *addr = (u32 *)(event + pred->offset);
|
||||
u32 val = (u32)pred->val;
|
||||
int match;
|
||||
|
||||
match = (val == *addr) ^ pred->not;
|
||||
|
||||
return match;
|
||||
}
|
||||
|
||||
static int filter_pred_16(struct filter_pred *pred, void *event)
|
||||
{
|
||||
u16 *addr = (u16 *)(event + pred->offset);
|
||||
u16 val = (u16)pred->val;
|
||||
int match;
|
||||
|
||||
match = (val == *addr) ^ pred->not;
|
||||
|
||||
return match;
|
||||
}
|
||||
|
||||
static int filter_pred_8(struct filter_pred *pred, void *event)
|
||||
{
|
||||
u8 *addr = (u8 *)(event + pred->offset);
|
||||
u8 val = (u8)pred->val;
|
||||
int match;
|
||||
|
||||
match = (val == *addr) ^ pred->not;
|
||||
|
||||
return match;
|
||||
}
|
||||
|
||||
static int filter_pred_string(struct filter_pred *pred, void *event)
|
||||
{
|
||||
char *addr = (char *)(event + pred->offset);
|
||||
int cmp, match;
|
||||
|
||||
cmp = strncmp(addr, pred->str_val, pred->str_len);
|
||||
|
||||
match = (!cmp) ^ pred->not;
|
||||
|
||||
return match;
|
||||
}
|
||||
|
||||
/* return 1 if event matches, 0 otherwise (discard) */
|
||||
int filter_match_preds(struct ftrace_event_call *call, void *rec)
|
||||
{
|
||||
int i, matched, and_failed = 0;
|
||||
struct filter_pred *pred;
|
||||
|
||||
for (i = 0; i < MAX_FILTER_PRED; i++) {
|
||||
if (call->preds[i]) {
|
||||
pred = call->preds[i];
|
||||
if (and_failed && !pred->or)
|
||||
continue;
|
||||
matched = pred->fn(pred, rec);
|
||||
if (!matched && !pred->or) {
|
||||
and_failed = 1;
|
||||
continue;
|
||||
} else if (matched && pred->or)
|
||||
return 1;
|
||||
} else
|
||||
break;
|
||||
}
|
||||
|
||||
if (and_failed)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
void filter_print_preds(struct filter_pred **preds, struct trace_seq *s)
|
||||
{
|
||||
char *field_name;
|
||||
struct filter_pred *pred;
|
||||
int i;
|
||||
|
||||
if (!preds) {
|
||||
trace_seq_printf(s, "none\n");
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < MAX_FILTER_PRED; i++) {
|
||||
if (preds[i]) {
|
||||
pred = preds[i];
|
||||
field_name = pred->field_name;
|
||||
if (i)
|
||||
trace_seq_printf(s, pred->or ? "|| " : "&& ");
|
||||
trace_seq_printf(s, "%s ", field_name);
|
||||
trace_seq_printf(s, pred->not ? "!= " : "== ");
|
||||
if (pred->str_val)
|
||||
trace_seq_printf(s, "%s\n", pred->str_val);
|
||||
else
|
||||
trace_seq_printf(s, "%llu\n", pred->val);
|
||||
} else
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static struct ftrace_event_field *
|
||||
find_event_field(struct ftrace_event_call *call, char *name)
|
||||
{
|
||||
struct ftrace_event_field *field;
|
||||
|
||||
list_for_each_entry(field, &call->fields, link) {
|
||||
if (!strcmp(field->name, name))
|
||||
return field;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void filter_free_pred(struct filter_pred *pred)
|
||||
{
|
||||
if (!pred)
|
||||
return;
|
||||
|
||||
kfree(pred->field_name);
|
||||
kfree(pred->str_val);
|
||||
kfree(pred);
|
||||
}
|
||||
|
||||
void filter_free_preds(struct ftrace_event_call *call)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (call->preds) {
|
||||
for (i = 0; i < MAX_FILTER_PRED; i++)
|
||||
filter_free_pred(call->preds[i]);
|
||||
kfree(call->preds);
|
||||
call->preds = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void filter_free_subsystem_preds(struct event_subsystem *system)
|
||||
{
|
||||
struct ftrace_event_call *call = __start_ftrace_events;
|
||||
int i;
|
||||
|
||||
if (system->preds) {
|
||||
for (i = 0; i < MAX_FILTER_PRED; i++)
|
||||
filter_free_pred(system->preds[i]);
|
||||
kfree(system->preds);
|
||||
system->preds = NULL;
|
||||
}
|
||||
|
||||
events_for_each(call) {
|
||||
if (!call->name || !call->regfunc)
|
||||
continue;
|
||||
|
||||
if (!strcmp(call->system, system->name))
|
||||
filter_free_preds(call);
|
||||
}
|
||||
}
|
||||
|
||||
static int __filter_add_pred(struct ftrace_event_call *call,
|
||||
struct filter_pred *pred)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (call->preds && !pred->compound)
|
||||
filter_free_preds(call);
|
||||
|
||||
if (!call->preds) {
|
||||
call->preds = kzalloc(MAX_FILTER_PRED * sizeof(pred),
|
||||
GFP_KERNEL);
|
||||
if (!call->preds)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < MAX_FILTER_PRED; i++) {
|
||||
if (!call->preds[i]) {
|
||||
call->preds[i] = pred;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int is_string_field(const char *type)
|
||||
{
|
||||
if (strchr(type, '[') && strstr(type, "char"))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int filter_add_pred(struct ftrace_event_call *call, struct filter_pred *pred)
|
||||
{
|
||||
struct ftrace_event_field *field;
|
||||
|
||||
field = find_event_field(call, pred->field_name);
|
||||
if (!field)
|
||||
return -EINVAL;
|
||||
|
||||
pred->offset = field->offset;
|
||||
|
||||
if (is_string_field(field->type)) {
|
||||
if (!pred->str_val)
|
||||
return -EINVAL;
|
||||
pred->fn = filter_pred_string;
|
||||
pred->str_len = field->size;
|
||||
return __filter_add_pred(call, pred);
|
||||
} else {
|
||||
if (pred->str_val)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (field->size) {
|
||||
case 8:
|
||||
pred->fn = filter_pred_64;
|
||||
break;
|
||||
case 4:
|
||||
pred->fn = filter_pred_32;
|
||||
break;
|
||||
case 2:
|
||||
pred->fn = filter_pred_16;
|
||||
break;
|
||||
case 1:
|
||||
pred->fn = filter_pred_8;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return __filter_add_pred(call, pred);
|
||||
}
|
||||
|
||||
static struct filter_pred *copy_pred(struct filter_pred *pred)
|
||||
{
|
||||
struct filter_pred *new_pred = kmalloc(sizeof(*pred), GFP_KERNEL);
|
||||
if (!new_pred)
|
||||
return NULL;
|
||||
|
||||
memcpy(new_pred, pred, sizeof(*pred));
|
||||
|
||||
if (pred->field_name) {
|
||||
new_pred->field_name = kstrdup(pred->field_name, GFP_KERNEL);
|
||||
if (!new_pred->field_name) {
|
||||
kfree(new_pred);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (pred->str_val) {
|
||||
new_pred->str_val = kstrdup(pred->str_val, GFP_KERNEL);
|
||||
if (!new_pred->str_val) {
|
||||
filter_free_pred(new_pred);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return new_pred;
|
||||
}
|
||||
|
||||
int filter_add_subsystem_pred(struct event_subsystem *system,
|
||||
struct filter_pred *pred)
|
||||
{
|
||||
struct ftrace_event_call *call = __start_ftrace_events;
|
||||
struct filter_pred *event_pred;
|
||||
int i;
|
||||
|
||||
if (system->preds && !pred->compound)
|
||||
filter_free_subsystem_preds(system);
|
||||
|
||||
if (!system->preds) {
|
||||
system->preds = kzalloc(MAX_FILTER_PRED * sizeof(pred),
|
||||
GFP_KERNEL);
|
||||
if (!system->preds)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < MAX_FILTER_PRED; i++) {
|
||||
if (!system->preds[i]) {
|
||||
system->preds[i] = pred;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (i == MAX_FILTER_PRED)
|
||||
return -EINVAL;
|
||||
|
||||
events_for_each(call) {
|
||||
int err;
|
||||
|
||||
if (!call->name || !call->regfunc)
|
||||
continue;
|
||||
|
||||
if (strcmp(call->system, system->name))
|
||||
continue;
|
||||
|
||||
if (!find_event_field(call, pred->field_name))
|
||||
continue;
|
||||
|
||||
event_pred = copy_pred(pred);
|
||||
if (!event_pred)
|
||||
goto oom;
|
||||
|
||||
err = filter_add_pred(call, event_pred);
|
||||
if (err)
|
||||
filter_free_pred(event_pred);
|
||||
if (err == -ENOMEM)
|
||||
goto oom;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
oom:
|
||||
system->preds[i] = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
int filter_parse(char **pbuf, struct filter_pred *pred)
|
||||
{
|
||||
char *tmp, *tok, *val_str = NULL;
|
||||
int tok_n = 0;
|
||||
|
||||
/* field ==/!= number, or/and field ==/!= number, number */
|
||||
while ((tok = strsep(pbuf, " \n"))) {
|
||||
if (tok_n == 0) {
|
||||
if (!strcmp(tok, "0")) {
|
||||
pred->clear = 1;
|
||||
return 0;
|
||||
} else if (!strcmp(tok, "&&")) {
|
||||
pred->or = 0;
|
||||
pred->compound = 1;
|
||||
} else if (!strcmp(tok, "||")) {
|
||||
pred->or = 1;
|
||||
pred->compound = 1;
|
||||
} else
|
||||
pred->field_name = tok;
|
||||
tok_n = 1;
|
||||
continue;
|
||||
}
|
||||
if (tok_n == 1) {
|
||||
if (!pred->field_name)
|
||||
pred->field_name = tok;
|
||||
else if (!strcmp(tok, "!="))
|
||||
pred->not = 1;
|
||||
else if (!strcmp(tok, "=="))
|
||||
pred->not = 0;
|
||||
else {
|
||||
pred->field_name = NULL;
|
||||
return -EINVAL;
|
||||
}
|
||||
tok_n = 2;
|
||||
continue;
|
||||
}
|
||||
if (tok_n == 2) {
|
||||
if (pred->compound) {
|
||||
if (!strcmp(tok, "!="))
|
||||
pred->not = 1;
|
||||
else if (!strcmp(tok, "=="))
|
||||
pred->not = 0;
|
||||
else {
|
||||
pred->field_name = NULL;
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
val_str = tok;
|
||||
break; /* done */
|
||||
}
|
||||
tok_n = 3;
|
||||
continue;
|
||||
}
|
||||
if (tok_n == 3) {
|
||||
val_str = tok;
|
||||
break; /* done */
|
||||
}
|
||||
}
|
||||
|
||||
pred->field_name = kstrdup(pred->field_name, GFP_KERNEL);
|
||||
if (!pred->field_name)
|
||||
return -ENOMEM;
|
||||
|
||||
pred->val = simple_strtoull(val_str, &tmp, 10);
|
||||
if (tmp == val_str) {
|
||||
pred->str_val = kstrdup(val_str, GFP_KERNEL);
|
||||
if (!pred->str_val)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
39
kernel/trace/trace_events_stage_1.h
Normal file
39
kernel/trace/trace_events_stage_1.h
Normal file
@@ -0,0 +1,39 @@
|
||||
/*
|
||||
* Stage 1 of the trace events.
|
||||
*
|
||||
* Override the macros in <trace/trace_event_types.h> to include the following:
|
||||
*
|
||||
* struct ftrace_raw_<call> {
|
||||
* struct trace_entry ent;
|
||||
* <type> <item>;
|
||||
* <type2> <item2>[<len>];
|
||||
* [...]
|
||||
* };
|
||||
*
|
||||
* The <type> <item> is created by the __field(type, item) macro or
|
||||
* the __array(type2, item2, len) macro.
|
||||
* We simply do "type item;", and that will create the fields
|
||||
* in the structure.
|
||||
*/
|
||||
|
||||
#undef TRACE_FORMAT
|
||||
#define TRACE_FORMAT(call, proto, args, fmt)
|
||||
|
||||
#undef __array
|
||||
#define __array(type, item, len) type item[len];
|
||||
|
||||
#undef __field
|
||||
#define __field(type, item) type item;
|
||||
|
||||
#undef TP_STRUCT__entry
|
||||
#define TP_STRUCT__entry(args...) args
|
||||
|
||||
#undef TRACE_EVENT
|
||||
#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
|
||||
struct ftrace_raw_##name { \
|
||||
struct trace_entry ent; \
|
||||
tstruct \
|
||||
}; \
|
||||
static struct ftrace_event_call event_##name
|
||||
|
||||
#include <trace/trace_event_types.h>
|
176
kernel/trace/trace_events_stage_2.h
Normal file
176
kernel/trace/trace_events_stage_2.h
Normal file
@@ -0,0 +1,176 @@
|
||||
/*
|
||||
* Stage 2 of the trace events.
|
||||
*
|
||||
* Override the macros in <trace/trace_event_types.h> to include the following:
|
||||
*
|
||||
* enum print_line_t
|
||||
* ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
|
||||
* {
|
||||
* struct trace_seq *s = &iter->seq;
|
||||
* struct ftrace_raw_<call> *field; <-- defined in stage 1
|
||||
* struct trace_entry *entry;
|
||||
* int ret;
|
||||
*
|
||||
* entry = iter->ent;
|
||||
*
|
||||
* if (entry->type != event_<call>.id) {
|
||||
* WARN_ON_ONCE(1);
|
||||
* return TRACE_TYPE_UNHANDLED;
|
||||
* }
|
||||
*
|
||||
* field = (typeof(field))entry;
|
||||
*
|
||||
* ret = trace_seq_printf(s, <TP_printk> "\n");
|
||||
* if (!ret)
|
||||
* return TRACE_TYPE_PARTIAL_LINE;
|
||||
*
|
||||
* return TRACE_TYPE_HANDLED;
|
||||
* }
|
||||
*
|
||||
* This is the method used to print the raw event to the trace
|
||||
* output format. Note, this is not needed if the data is read
|
||||
* in binary.
|
||||
*/
|
||||
|
||||
#undef __entry
|
||||
#define __entry field
|
||||
|
||||
#undef TP_printk
|
||||
#define TP_printk(fmt, args...) fmt "\n", args
|
||||
|
||||
#undef TRACE_EVENT
|
||||
#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
|
||||
enum print_line_t \
|
||||
ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
|
||||
{ \
|
||||
struct trace_seq *s = &iter->seq; \
|
||||
struct ftrace_raw_##call *field; \
|
||||
struct trace_entry *entry; \
|
||||
int ret; \
|
||||
\
|
||||
entry = iter->ent; \
|
||||
\
|
||||
if (entry->type != event_##call.id) { \
|
||||
WARN_ON_ONCE(1); \
|
||||
return TRACE_TYPE_UNHANDLED; \
|
||||
} \
|
||||
\
|
||||
field = (typeof(field))entry; \
|
||||
\
|
||||
ret = trace_seq_printf(s, #call ": " print); \
|
||||
if (!ret) \
|
||||
return TRACE_TYPE_PARTIAL_LINE; \
|
||||
\
|
||||
return TRACE_TYPE_HANDLED; \
|
||||
}
|
||||
|
||||
#include <trace/trace_event_types.h>
|
||||
|
||||
/*
|
||||
* Setup the showing format of trace point.
|
||||
*
|
||||
* int
|
||||
* ftrace_format_##call(struct trace_seq *s)
|
||||
* {
|
||||
* struct ftrace_raw_##call field;
|
||||
* int ret;
|
||||
*
|
||||
* ret = trace_seq_printf(s, #type " " #item ";"
|
||||
* " offset:%u; size:%u;\n",
|
||||
* offsetof(struct ftrace_raw_##call, item),
|
||||
* sizeof(field.type));
|
||||
*
|
||||
* }
|
||||
*/
|
||||
|
||||
#undef TP_STRUCT__entry
|
||||
#define TP_STRUCT__entry(args...) args
|
||||
|
||||
#undef __field
|
||||
#define __field(type, item) \
|
||||
ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
|
||||
"offset:%u;\tsize:%u;\n", \
|
||||
(unsigned int)offsetof(typeof(field), item), \
|
||||
(unsigned int)sizeof(field.item)); \
|
||||
if (!ret) \
|
||||
return 0;
|
||||
|
||||
#undef __array
|
||||
#define __array(type, item, len) \
|
||||
ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
|
||||
"offset:%u;\tsize:%u;\n", \
|
||||
(unsigned int)offsetof(typeof(field), item), \
|
||||
(unsigned int)sizeof(field.item)); \
|
||||
if (!ret) \
|
||||
return 0;
|
||||
|
||||
#undef __entry
|
||||
#define __entry "REC"
|
||||
|
||||
#undef TP_printk
|
||||
#define TP_printk(fmt, args...) "%s, %s\n", #fmt, #args
|
||||
|
||||
#undef TP_fast_assign
|
||||
#define TP_fast_assign(args...) args
|
||||
|
||||
#undef TRACE_EVENT
|
||||
#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
|
||||
static int \
|
||||
ftrace_format_##call(struct trace_seq *s) \
|
||||
{ \
|
||||
struct ftrace_raw_##call field; \
|
||||
int ret; \
|
||||
\
|
||||
tstruct; \
|
||||
\
|
||||
trace_seq_printf(s, "\nprint fmt: " print); \
|
||||
\
|
||||
return ret; \
|
||||
}
|
||||
|
||||
#include <trace/trace_event_types.h>
|
||||
|
||||
#undef __field
|
||||
#define __field(type, item) \
|
||||
ret = trace_define_field(event_call, #type, #item, \
|
||||
offsetof(typeof(field), item), \
|
||||
sizeof(field.item)); \
|
||||
if (ret) \
|
||||
return ret;
|
||||
|
||||
#undef __array
|
||||
#define __array(type, item, len) \
|
||||
ret = trace_define_field(event_call, #type "[" #len "]", #item, \
|
||||
offsetof(typeof(field), item), \
|
||||
sizeof(field.item)); \
|
||||
if (ret) \
|
||||
return ret;
|
||||
|
||||
#define __common_field(type, item) \
|
||||
ret = trace_define_field(event_call, #type, "common_" #item, \
|
||||
offsetof(typeof(field.ent), item), \
|
||||
sizeof(field.ent.item)); \
|
||||
if (ret) \
|
||||
return ret;
|
||||
|
||||
#undef TRACE_EVENT
|
||||
#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
|
||||
int \
|
||||
ftrace_define_fields_##call(void) \
|
||||
{ \
|
||||
struct ftrace_raw_##call field; \
|
||||
struct ftrace_event_call *event_call = &event_##call; \
|
||||
int ret; \
|
||||
\
|
||||
__common_field(unsigned char, type); \
|
||||
__common_field(unsigned char, flags); \
|
||||
__common_field(unsigned char, preempt_count); \
|
||||
__common_field(int, pid); \
|
||||
__common_field(int, tgid); \
|
||||
\
|
||||
tstruct; \
|
||||
\
|
||||
return ret; \
|
||||
}
|
||||
|
||||
#include <trace/trace_event_types.h>
|
281
kernel/trace/trace_events_stage_3.h
Normal file
281
kernel/trace/trace_events_stage_3.h
Normal file
@@ -0,0 +1,281 @@
|
||||
/*
|
||||
* Stage 3 of the trace events.
|
||||
*
|
||||
* Override the macros in <trace/trace_event_types.h> to include the following:
|
||||
*
|
||||
* static void ftrace_event_<call>(proto)
|
||||
* {
|
||||
* event_trace_printk(_RET_IP_, "<call>: " <fmt>);
|
||||
* }
|
||||
*
|
||||
* static int ftrace_reg_event_<call>(void)
|
||||
* {
|
||||
* int ret;
|
||||
*
|
||||
* ret = register_trace_<call>(ftrace_event_<call>);
|
||||
* if (!ret)
|
||||
* pr_info("event trace: Could not activate trace point "
|
||||
* "probe to <call>");
|
||||
* return ret;
|
||||
* }
|
||||
*
|
||||
* static void ftrace_unreg_event_<call>(void)
|
||||
* {
|
||||
* unregister_trace_<call>(ftrace_event_<call>);
|
||||
* }
|
||||
*
|
||||
* For those macros defined with TRACE_FORMAT:
|
||||
*
|
||||
* static struct ftrace_event_call __used
|
||||
* __attribute__((__aligned__(4)))
|
||||
* __attribute__((section("_ftrace_events"))) event_<call> = {
|
||||
* .name = "<call>",
|
||||
* .regfunc = ftrace_reg_event_<call>,
|
||||
* .unregfunc = ftrace_unreg_event_<call>,
|
||||
* }
|
||||
*
|
||||
*
|
||||
* For those macros defined with TRACE_EVENT:
|
||||
*
|
||||
* static struct ftrace_event_call event_<call>;
|
||||
*
|
||||
* static void ftrace_raw_event_<call>(proto)
|
||||
* {
|
||||
* struct ring_buffer_event *event;
|
||||
* struct ftrace_raw_<call> *entry; <-- defined in stage 1
|
||||
* unsigned long irq_flags;
|
||||
* int pc;
|
||||
*
|
||||
* local_save_flags(irq_flags);
|
||||
* pc = preempt_count();
|
||||
*
|
||||
* event = trace_current_buffer_lock_reserve(event_<call>.id,
|
||||
* sizeof(struct ftrace_raw_<call>),
|
||||
* irq_flags, pc);
|
||||
* if (!event)
|
||||
* return;
|
||||
* entry = ring_buffer_event_data(event);
|
||||
*
|
||||
* <assign>; <-- Here we assign the entries by the __field and
|
||||
* __array macros.
|
||||
*
|
||||
* trace_current_buffer_unlock_commit(event, irq_flags, pc);
|
||||
* }
|
||||
*
|
||||
* static int ftrace_raw_reg_event_<call>(void)
|
||||
* {
|
||||
* int ret;
|
||||
*
|
||||
* ret = register_trace_<call>(ftrace_raw_event_<call>);
|
||||
* if (!ret)
|
||||
* pr_info("event trace: Could not activate trace point "
|
||||
* "probe to <call>");
|
||||
* return ret;
|
||||
* }
|
||||
*
|
||||
* static void ftrace_unreg_event_<call>(void)
|
||||
* {
|
||||
* unregister_trace_<call>(ftrace_raw_event_<call>);
|
||||
* }
|
||||
*
|
||||
* static struct trace_event ftrace_event_type_<call> = {
|
||||
* .trace = ftrace_raw_output_<call>, <-- stage 2
|
||||
* };
|
||||
*
|
||||
* static int ftrace_raw_init_event_<call>(void)
|
||||
* {
|
||||
* int id;
|
||||
*
|
||||
* id = register_ftrace_event(&ftrace_event_type_<call>);
|
||||
* if (!id)
|
||||
* return -ENODEV;
|
||||
* event_<call>.id = id;
|
||||
* return 0;
|
||||
* }
|
||||
*
|
||||
* static struct ftrace_event_call __used
|
||||
* __attribute__((__aligned__(4)))
|
||||
* __attribute__((section("_ftrace_events"))) event_<call> = {
|
||||
* .name = "<call>",
|
||||
* .system = "<system>",
|
||||
* .raw_init = ftrace_raw_init_event_<call>,
|
||||
* .regfunc = ftrace_reg_event_<call>,
|
||||
* .unregfunc = ftrace_unreg_event_<call>,
|
||||
* .show_format = ftrace_format_<call>,
|
||||
* }
|
||||
*
|
||||
*/
|
||||
|
||||
#undef TP_FMT
|
||||
#define TP_FMT(fmt, args...) fmt "\n", ##args
|
||||
|
||||
#ifdef CONFIG_EVENT_PROFILE
|
||||
#define _TRACE_PROFILE(call, proto, args) \
|
||||
static void ftrace_profile_##call(proto) \
|
||||
{ \
|
||||
extern void perf_tpcounter_event(int); \
|
||||
perf_tpcounter_event(event_##call.id); \
|
||||
} \
|
||||
\
|
||||
static int ftrace_profile_enable_##call(struct ftrace_event_call *call) \
|
||||
{ \
|
||||
int ret = 0; \
|
||||
\
|
||||
if (!atomic_inc_return(&call->profile_count)) \
|
||||
ret = register_trace_##call(ftrace_profile_##call); \
|
||||
\
|
||||
return ret; \
|
||||
} \
|
||||
\
|
||||
static void ftrace_profile_disable_##call(struct ftrace_event_call *call) \
|
||||
{ \
|
||||
if (atomic_add_negative(-1, &call->profile_count)) \
|
||||
unregister_trace_##call(ftrace_profile_##call); \
|
||||
}
|
||||
|
||||
#define _TRACE_PROFILE_INIT(call) \
|
||||
.profile_count = ATOMIC_INIT(-1), \
|
||||
.profile_enable = ftrace_profile_enable_##call, \
|
||||
.profile_disable = ftrace_profile_disable_##call,
|
||||
|
||||
#else
|
||||
#define _TRACE_PROFILE(call, proto, args)
|
||||
#define _TRACE_PROFILE_INIT(call)
|
||||
#endif
|
||||
|
||||
#define _TRACE_FORMAT(call, proto, args, fmt) \
|
||||
static void ftrace_event_##call(proto) \
|
||||
{ \
|
||||
event_trace_printk(_RET_IP_, #call ": " fmt); \
|
||||
} \
|
||||
\
|
||||
static int ftrace_reg_event_##call(void) \
|
||||
{ \
|
||||
int ret; \
|
||||
\
|
||||
ret = register_trace_##call(ftrace_event_##call); \
|
||||
if (ret) \
|
||||
pr_info("event trace: Could not activate trace point " \
|
||||
"probe to " #call "\n"); \
|
||||
return ret; \
|
||||
} \
|
||||
\
|
||||
static void ftrace_unreg_event_##call(void) \
|
||||
{ \
|
||||
unregister_trace_##call(ftrace_event_##call); \
|
||||
} \
|
||||
\
|
||||
static struct ftrace_event_call event_##call; \
|
||||
\
|
||||
static int ftrace_init_event_##call(void) \
|
||||
{ \
|
||||
int id; \
|
||||
\
|
||||
id = register_ftrace_event(NULL); \
|
||||
if (!id) \
|
||||
return -ENODEV; \
|
||||
event_##call.id = id; \
|
||||
return 0; \
|
||||
}
|
||||
|
||||
#undef TRACE_FORMAT
|
||||
#define TRACE_FORMAT(call, proto, args, fmt) \
|
||||
_TRACE_FORMAT(call, PARAMS(proto), PARAMS(args), PARAMS(fmt)) \
|
||||
_TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \
|
||||
static struct ftrace_event_call __used \
|
||||
__attribute__((__aligned__(4))) \
|
||||
__attribute__((section("_ftrace_events"))) event_##call = { \
|
||||
.name = #call, \
|
||||
.system = __stringify(TRACE_SYSTEM), \
|
||||
.raw_init = ftrace_init_event_##call, \
|
||||
.regfunc = ftrace_reg_event_##call, \
|
||||
.unregfunc = ftrace_unreg_event_##call, \
|
||||
_TRACE_PROFILE_INIT(call) \
|
||||
}
|
||||
|
||||
#undef __entry
|
||||
#define __entry entry
|
||||
|
||||
#undef TRACE_EVENT
|
||||
#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
|
||||
_TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \
|
||||
\
|
||||
static struct ftrace_event_call event_##call; \
|
||||
\
|
||||
static void ftrace_raw_event_##call(proto) \
|
||||
{ \
|
||||
struct ftrace_event_call *call = &event_##call; \
|
||||
struct ring_buffer_event *event; \
|
||||
struct ftrace_raw_##call *entry; \
|
||||
unsigned long irq_flags; \
|
||||
int pc; \
|
||||
\
|
||||
local_save_flags(irq_flags); \
|
||||
pc = preempt_count(); \
|
||||
\
|
||||
event = trace_current_buffer_lock_reserve(event_##call.id, \
|
||||
sizeof(struct ftrace_raw_##call), \
|
||||
irq_flags, pc); \
|
||||
if (!event) \
|
||||
return; \
|
||||
entry = ring_buffer_event_data(event); \
|
||||
\
|
||||
assign; \
|
||||
\
|
||||
if (call->preds && !filter_match_preds(call, entry)) \
|
||||
ring_buffer_event_discard(event); \
|
||||
\
|
||||
trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \
|
||||
\
|
||||
} \
|
||||
\
|
||||
static int ftrace_raw_reg_event_##call(void) \
|
||||
{ \
|
||||
int ret; \
|
||||
\
|
||||
ret = register_trace_##call(ftrace_raw_event_##call); \
|
||||
if (ret) \
|
||||
pr_info("event trace: Could not activate trace point " \
|
||||
"probe to " #call "\n"); \
|
||||
return ret; \
|
||||
} \
|
||||
\
|
||||
static void ftrace_raw_unreg_event_##call(void) \
|
||||
{ \
|
||||
unregister_trace_##call(ftrace_raw_event_##call); \
|
||||
} \
|
||||
\
|
||||
static struct trace_event ftrace_event_type_##call = { \
|
||||
.trace = ftrace_raw_output_##call, \
|
||||
}; \
|
||||
\
|
||||
static int ftrace_raw_init_event_##call(void) \
|
||||
{ \
|
||||
int id; \
|
||||
\
|
||||
id = register_ftrace_event(&ftrace_event_type_##call); \
|
||||
if (!id) \
|
||||
return -ENODEV; \
|
||||
event_##call.id = id; \
|
||||
INIT_LIST_HEAD(&event_##call.fields); \
|
||||
return 0; \
|
||||
} \
|
||||
\
|
||||
static struct ftrace_event_call __used \
|
||||
__attribute__((__aligned__(4))) \
|
||||
__attribute__((section("_ftrace_events"))) event_##call = { \
|
||||
.name = #call, \
|
||||
.system = __stringify(TRACE_SYSTEM), \
|
||||
.raw_init = ftrace_raw_init_event_##call, \
|
||||
.regfunc = ftrace_raw_reg_event_##call, \
|
||||
.unregfunc = ftrace_raw_unreg_event_##call, \
|
||||
.show_format = ftrace_format_##call, \
|
||||
.define_fields = ftrace_define_fields_##call, \
|
||||
_TRACE_PROFILE_INIT(call) \
|
||||
}
|
||||
|
||||
#include <trace/trace_event_types.h>
|
||||
|
||||
#undef _TRACE_PROFILE
|
||||
#undef _TRACE_PROFILE_INIT
|
||||
|
102
kernel/trace/trace_export.c
Normal file
102
kernel/trace/trace_export.c
Normal file
@@ -0,0 +1,102 @@
|
||||
/*
|
||||
* trace_export.c - export basic ftrace utilities to user space
|
||||
*
|
||||
* Copyright (C) 2009 Steven Rostedt <srostedt@redhat.com>
|
||||
*/
|
||||
#include <linux/stringify.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/fs.h>
|
||||
|
||||
#include "trace_output.h"
|
||||
|
||||
|
||||
#undef TRACE_STRUCT
|
||||
#define TRACE_STRUCT(args...) args
|
||||
|
||||
#undef TRACE_FIELD
|
||||
#define TRACE_FIELD(type, item, assign) \
|
||||
ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
|
||||
"offset:%u;\tsize:%u;\n", \
|
||||
(unsigned int)offsetof(typeof(field), item), \
|
||||
(unsigned int)sizeof(field.item)); \
|
||||
if (!ret) \
|
||||
return 0;
|
||||
|
||||
|
||||
#undef TRACE_FIELD_SPECIAL
|
||||
#define TRACE_FIELD_SPECIAL(type_item, item, cmd) \
|
||||
ret = trace_seq_printf(s, "\tfield special:" #type_item ";\t" \
|
||||
"offset:%u;\tsize:%u;\n", \
|
||||
(unsigned int)offsetof(typeof(field), item), \
|
||||
(unsigned int)sizeof(field.item)); \
|
||||
if (!ret) \
|
||||
return 0;
|
||||
|
||||
#undef TRACE_FIELD_ZERO_CHAR
|
||||
#define TRACE_FIELD_ZERO_CHAR(item) \
|
||||
ret = trace_seq_printf(s, "\tfield: char " #item ";\t" \
|
||||
"offset:%u;\tsize:0;\n", \
|
||||
(unsigned int)offsetof(typeof(field), item)); \
|
||||
if (!ret) \
|
||||
return 0;
|
||||
|
||||
|
||||
#undef TP_RAW_FMT
|
||||
#define TP_RAW_FMT(args...) args
|
||||
|
||||
#undef TRACE_EVENT_FORMAT
|
||||
#define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt) \
|
||||
static int \
|
||||
ftrace_format_##call(struct trace_seq *s) \
|
||||
{ \
|
||||
struct args field; \
|
||||
int ret; \
|
||||
\
|
||||
tstruct; \
|
||||
\
|
||||
trace_seq_printf(s, "\nprint fmt: \"%s\"\n", tpfmt); \
|
||||
\
|
||||
return ret; \
|
||||
}
|
||||
|
||||
#include "trace_event_types.h"
|
||||
|
||||
#undef TRACE_ZERO_CHAR
|
||||
#define TRACE_ZERO_CHAR(arg)
|
||||
|
||||
#undef TRACE_FIELD
|
||||
#define TRACE_FIELD(type, item, assign)\
|
||||
entry->item = assign;
|
||||
|
||||
#undef TRACE_FIELD
|
||||
#define TRACE_FIELD(type, item, assign)\
|
||||
entry->item = assign;
|
||||
|
||||
#undef TP_CMD
|
||||
#define TP_CMD(cmd...) cmd
|
||||
|
||||
#undef TRACE_ENTRY
|
||||
#define TRACE_ENTRY entry
|
||||
|
||||
#undef TRACE_FIELD_SPECIAL
|
||||
#define TRACE_FIELD_SPECIAL(type_item, item, cmd) \
|
||||
cmd;
|
||||
|
||||
#undef TRACE_EVENT_FORMAT
|
||||
#define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt) \
|
||||
\
|
||||
static struct ftrace_event_call __used \
|
||||
__attribute__((__aligned__(4))) \
|
||||
__attribute__((section("_ftrace_events"))) event_##call = { \
|
||||
.name = #call, \
|
||||
.id = proto, \
|
||||
.system = __stringify(TRACE_SYSTEM), \
|
||||
.show_format = ftrace_format_##call, \
|
||||
}
|
||||
#include "trace_event_types.h"
|
@@ -9,6 +9,7 @@
|
||||
* Copyright (C) 2004-2006 Ingo Molnar
|
||||
* Copyright (C) 2004 William Lee Irwin III
|
||||
*/
|
||||
#include <linux/ring_buffer.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/ftrace.h>
|
||||
@@ -16,31 +17,29 @@
|
||||
|
||||
#include "trace.h"
|
||||
|
||||
static void start_function_trace(struct trace_array *tr)
|
||||
/* function tracing enabled */
|
||||
static int ftrace_function_enabled;
|
||||
|
||||
static struct trace_array *func_trace;
|
||||
|
||||
static void tracing_start_function_trace(void);
|
||||
static void tracing_stop_function_trace(void);
|
||||
|
||||
static int function_trace_init(struct trace_array *tr)
|
||||
{
|
||||
func_trace = tr;
|
||||
tr->cpu = get_cpu();
|
||||
tracing_reset_online_cpus(tr);
|
||||
put_cpu();
|
||||
|
||||
tracing_start_cmdline_record();
|
||||
tracing_start_function_trace();
|
||||
}
|
||||
|
||||
static void stop_function_trace(struct trace_array *tr)
|
||||
{
|
||||
tracing_stop_function_trace();
|
||||
tracing_stop_cmdline_record();
|
||||
}
|
||||
|
||||
static int function_trace_init(struct trace_array *tr)
|
||||
{
|
||||
start_function_trace(tr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void function_trace_reset(struct trace_array *tr)
|
||||
{
|
||||
stop_function_trace(tr);
|
||||
tracing_stop_function_trace();
|
||||
tracing_stop_cmdline_record();
|
||||
}
|
||||
|
||||
static void function_trace_start(struct trace_array *tr)
|
||||
@@ -48,20 +47,358 @@ static void function_trace_start(struct trace_array *tr)
|
||||
tracing_reset_online_cpus(tr);
|
||||
}
|
||||
|
||||
static void
|
||||
function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
|
||||
{
|
||||
struct trace_array *tr = func_trace;
|
||||
struct trace_array_cpu *data;
|
||||
unsigned long flags;
|
||||
long disabled;
|
||||
int cpu, resched;
|
||||
int pc;
|
||||
|
||||
if (unlikely(!ftrace_function_enabled))
|
||||
return;
|
||||
|
||||
pc = preempt_count();
|
||||
resched = ftrace_preempt_disable();
|
||||
local_save_flags(flags);
|
||||
cpu = raw_smp_processor_id();
|
||||
data = tr->data[cpu];
|
||||
disabled = atomic_inc_return(&data->disabled);
|
||||
|
||||
if (likely(disabled == 1))
|
||||
trace_function(tr, ip, parent_ip, flags, pc);
|
||||
|
||||
atomic_dec(&data->disabled);
|
||||
ftrace_preempt_enable(resched);
|
||||
}
|
||||
|
||||
static void
|
||||
function_trace_call(unsigned long ip, unsigned long parent_ip)
|
||||
{
|
||||
struct trace_array *tr = func_trace;
|
||||
struct trace_array_cpu *data;
|
||||
unsigned long flags;
|
||||
long disabled;
|
||||
int cpu;
|
||||
int pc;
|
||||
|
||||
if (unlikely(!ftrace_function_enabled))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Need to use raw, since this must be called before the
|
||||
* recursive protection is performed.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
cpu = raw_smp_processor_id();
|
||||
data = tr->data[cpu];
|
||||
disabled = atomic_inc_return(&data->disabled);
|
||||
|
||||
if (likely(disabled == 1)) {
|
||||
pc = preempt_count();
|
||||
trace_function(tr, ip, parent_ip, flags, pc);
|
||||
}
|
||||
|
||||
atomic_dec(&data->disabled);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void
|
||||
function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
|
||||
{
|
||||
struct trace_array *tr = func_trace;
|
||||
struct trace_array_cpu *data;
|
||||
unsigned long flags;
|
||||
long disabled;
|
||||
int cpu;
|
||||
int pc;
|
||||
|
||||
if (unlikely(!ftrace_function_enabled))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Need to use raw, since this must be called before the
|
||||
* recursive protection is performed.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
cpu = raw_smp_processor_id();
|
||||
data = tr->data[cpu];
|
||||
disabled = atomic_inc_return(&data->disabled);
|
||||
|
||||
if (likely(disabled == 1)) {
|
||||
pc = preempt_count();
|
||||
trace_function(tr, ip, parent_ip, flags, pc);
|
||||
/*
|
||||
* skip over 5 funcs:
|
||||
* __ftrace_trace_stack,
|
||||
* __trace_stack,
|
||||
* function_stack_trace_call
|
||||
* ftrace_list_func
|
||||
* ftrace_call
|
||||
*/
|
||||
__trace_stack(tr, flags, 5, pc);
|
||||
}
|
||||
|
||||
atomic_dec(&data->disabled);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
|
||||
static struct ftrace_ops trace_ops __read_mostly =
|
||||
{
|
||||
.func = function_trace_call,
|
||||
};
|
||||
|
||||
static struct ftrace_ops trace_stack_ops __read_mostly =
|
||||
{
|
||||
.func = function_stack_trace_call,
|
||||
};
|
||||
|
||||
/* Our two options */
|
||||
enum {
|
||||
TRACE_FUNC_OPT_STACK = 0x1,
|
||||
};
|
||||
|
||||
static struct tracer_opt func_opts[] = {
|
||||
#ifdef CONFIG_STACKTRACE
|
||||
{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
|
||||
#endif
|
||||
{ } /* Always set a last empty entry */
|
||||
};
|
||||
|
||||
static struct tracer_flags func_flags = {
|
||||
.val = 0, /* By default: all flags disabled */
|
||||
.opts = func_opts
|
||||
};
|
||||
|
||||
static void tracing_start_function_trace(void)
|
||||
{
|
||||
ftrace_function_enabled = 0;
|
||||
|
||||
if (trace_flags & TRACE_ITER_PREEMPTONLY)
|
||||
trace_ops.func = function_trace_call_preempt_only;
|
||||
else
|
||||
trace_ops.func = function_trace_call;
|
||||
|
||||
if (func_flags.val & TRACE_FUNC_OPT_STACK)
|
||||
register_ftrace_function(&trace_stack_ops);
|
||||
else
|
||||
register_ftrace_function(&trace_ops);
|
||||
|
||||
ftrace_function_enabled = 1;
|
||||
}
|
||||
|
||||
static void tracing_stop_function_trace(void)
|
||||
{
|
||||
ftrace_function_enabled = 0;
|
||||
/* OK if they are not registered */
|
||||
unregister_ftrace_function(&trace_stack_ops);
|
||||
unregister_ftrace_function(&trace_ops);
|
||||
}
|
||||
|
||||
static int func_set_flag(u32 old_flags, u32 bit, int set)
|
||||
{
|
||||
if (bit == TRACE_FUNC_OPT_STACK) {
|
||||
/* do nothing if already set */
|
||||
if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
|
||||
return 0;
|
||||
|
||||
if (set) {
|
||||
unregister_ftrace_function(&trace_ops);
|
||||
register_ftrace_function(&trace_stack_ops);
|
||||
} else {
|
||||
unregister_ftrace_function(&trace_stack_ops);
|
||||
register_ftrace_function(&trace_ops);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static struct tracer function_trace __read_mostly =
|
||||
{
|
||||
.name = "function",
|
||||
.init = function_trace_init,
|
||||
.reset = function_trace_reset,
|
||||
.start = function_trace_start,
|
||||
.name = "function",
|
||||
.init = function_trace_init,
|
||||
.reset = function_trace_reset,
|
||||
.start = function_trace_start,
|
||||
.wait_pipe = poll_wait_pipe,
|
||||
.flags = &func_flags,
|
||||
.set_flag = func_set_flag,
|
||||
#ifdef CONFIG_FTRACE_SELFTEST
|
||||
.selftest = trace_selftest_startup_function,
|
||||
.selftest = trace_selftest_startup_function,
|
||||
#endif
|
||||
};
|
||||
|
||||
static __init int init_function_trace(void)
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
static void
|
||||
ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
|
||||
{
|
||||
return register_tracer(&function_trace);
|
||||
long *count = (long *)data;
|
||||
|
||||
if (tracing_is_on())
|
||||
return;
|
||||
|
||||
if (!*count)
|
||||
return;
|
||||
|
||||
if (*count != -1)
|
||||
(*count)--;
|
||||
|
||||
tracing_on();
|
||||
}
|
||||
|
||||
static void
|
||||
ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
|
||||
{
|
||||
long *count = (long *)data;
|
||||
|
||||
if (!tracing_is_on())
|
||||
return;
|
||||
|
||||
if (!*count)
|
||||
return;
|
||||
|
||||
if (*count != -1)
|
||||
(*count)--;
|
||||
|
||||
tracing_off();
|
||||
}
|
||||
|
||||
static int
|
||||
ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
|
||||
struct ftrace_probe_ops *ops, void *data);
|
||||
|
||||
static struct ftrace_probe_ops traceon_probe_ops = {
|
||||
.func = ftrace_traceon,
|
||||
.print = ftrace_trace_onoff_print,
|
||||
};
|
||||
|
||||
static struct ftrace_probe_ops traceoff_probe_ops = {
|
||||
.func = ftrace_traceoff,
|
||||
.print = ftrace_trace_onoff_print,
|
||||
};
|
||||
|
||||
static int
|
||||
ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
|
||||
struct ftrace_probe_ops *ops, void *data)
|
||||
{
|
||||
char str[KSYM_SYMBOL_LEN];
|
||||
long count = (long)data;
|
||||
|
||||
kallsyms_lookup(ip, NULL, NULL, NULL, str);
|
||||
seq_printf(m, "%s:", str);
|
||||
|
||||
if (ops == &traceon_probe_ops)
|
||||
seq_printf(m, "traceon");
|
||||
else
|
||||
seq_printf(m, "traceoff");
|
||||
|
||||
if (count == -1)
|
||||
seq_printf(m, ":unlimited\n");
|
||||
else
|
||||
seq_printf(m, ":count=%ld", count);
|
||||
seq_putc(m, '\n');
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
|
||||
{
|
||||
struct ftrace_probe_ops *ops;
|
||||
|
||||
/* we register both traceon and traceoff to this callback */
|
||||
if (strcmp(cmd, "traceon") == 0)
|
||||
ops = &traceon_probe_ops;
|
||||
else
|
||||
ops = &traceoff_probe_ops;
|
||||
|
||||
unregister_ftrace_function_probe_func(glob, ops);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ftrace_trace_onoff_callback(char *glob, char *cmd, char *param, int enable)
|
||||
{
|
||||
struct ftrace_probe_ops *ops;
|
||||
void *count = (void *)-1;
|
||||
char *number;
|
||||
int ret;
|
||||
|
||||
/* hash funcs only work with set_ftrace_filter */
|
||||
if (!enable)
|
||||
return -EINVAL;
|
||||
|
||||
if (glob[0] == '!')
|
||||
return ftrace_trace_onoff_unreg(glob+1, cmd, param);
|
||||
|
||||
/* we register both traceon and traceoff to this callback */
|
||||
if (strcmp(cmd, "traceon") == 0)
|
||||
ops = &traceon_probe_ops;
|
||||
else
|
||||
ops = &traceoff_probe_ops;
|
||||
|
||||
if (!param)
|
||||
goto out_reg;
|
||||
|
||||
number = strsep(¶m, ":");
|
||||
|
||||
if (!strlen(number))
|
||||
goto out_reg;
|
||||
|
||||
/*
|
||||
* We use the callback data field (which is a pointer)
|
||||
* as our counter.
|
||||
*/
|
||||
ret = strict_strtoul(number, 0, (unsigned long *)&count);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
out_reg:
|
||||
ret = register_ftrace_function_probe(glob, ops, count);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct ftrace_func_command ftrace_traceon_cmd = {
|
||||
.name = "traceon",
|
||||
.func = ftrace_trace_onoff_callback,
|
||||
};
|
||||
|
||||
static struct ftrace_func_command ftrace_traceoff_cmd = {
|
||||
.name = "traceoff",
|
||||
.func = ftrace_trace_onoff_callback,
|
||||
};
|
||||
|
||||
static int __init init_func_cmd_traceon(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = register_ftrace_command(&ftrace_traceoff_cmd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = register_ftrace_command(&ftrace_traceon_cmd);
|
||||
if (ret)
|
||||
unregister_ftrace_command(&ftrace_traceoff_cmd);
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
static inline int init_func_cmd_traceon(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
|
||||
static __init int init_function_trace(void)
|
||||
{
|
||||
init_func_cmd_traceon();
|
||||
return register_tracer(&function_trace);
|
||||
}
|
||||
device_initcall(init_function_trace);
|
||||
|
||||
|
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
*
|
||||
* Function graph tracer.
|
||||
* Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
|
||||
* Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
|
||||
* Mostly borrowed from function tracer which
|
||||
* is Copyright (c) Steven Rostedt <srostedt@redhat.com>
|
||||
*
|
||||
@@ -12,6 +12,12 @@
|
||||
#include <linux/fs.h>
|
||||
|
||||
#include "trace.h"
|
||||
#include "trace_output.h"
|
||||
|
||||
struct fgraph_data {
|
||||
pid_t last_pid;
|
||||
int depth;
|
||||
};
|
||||
|
||||
#define TRACE_GRAPH_INDENT 2
|
||||
|
||||
@@ -20,9 +26,11 @@
|
||||
#define TRACE_GRAPH_PRINT_CPU 0x2
|
||||
#define TRACE_GRAPH_PRINT_OVERHEAD 0x4
|
||||
#define TRACE_GRAPH_PRINT_PROC 0x8
|
||||
#define TRACE_GRAPH_PRINT_DURATION 0x10
|
||||
#define TRACE_GRAPH_PRINT_ABS_TIME 0X20
|
||||
|
||||
static struct tracer_opt trace_opts[] = {
|
||||
/* Display overruns ? */
|
||||
/* Display overruns? (for self-debug purpose) */
|
||||
{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
|
||||
/* Display CPU ? */
|
||||
{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
|
||||
@@ -30,23 +38,28 @@ static struct tracer_opt trace_opts[] = {
|
||||
{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
|
||||
/* Display proc name/pid */
|
||||
{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
|
||||
/* Display duration of execution */
|
||||
{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
|
||||
/* Display absolute time of an entry */
|
||||
{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
|
||||
{ } /* Empty entry */
|
||||
};
|
||||
|
||||
static struct tracer_flags tracer_flags = {
|
||||
/* Don't display overruns and proc by default */
|
||||
.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD,
|
||||
.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
|
||||
TRACE_GRAPH_PRINT_DURATION,
|
||||
.opts = trace_opts
|
||||
};
|
||||
|
||||
/* pid on the last trace processed */
|
||||
static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 };
|
||||
|
||||
|
||||
/* Add a function return address to the trace stack on thread info.*/
|
||||
int
|
||||
ftrace_push_return_trace(unsigned long ret, unsigned long long time,
|
||||
unsigned long func, int *depth)
|
||||
ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth)
|
||||
{
|
||||
unsigned long long calltime;
|
||||
int index;
|
||||
|
||||
if (!current->ret_stack)
|
||||
@@ -58,11 +71,13 @@ ftrace_push_return_trace(unsigned long ret, unsigned long long time,
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
calltime = trace_clock_local();
|
||||
|
||||
index = ++current->curr_ret_stack;
|
||||
barrier();
|
||||
current->ret_stack[index].ret = ret;
|
||||
current->ret_stack[index].func = func;
|
||||
current->ret_stack[index].calltime = time;
|
||||
current->ret_stack[index].calltime = calltime;
|
||||
*depth = index;
|
||||
|
||||
return 0;
|
||||
@@ -104,7 +119,7 @@ unsigned long ftrace_return_to_handler(void)
|
||||
unsigned long ret;
|
||||
|
||||
ftrace_pop_return_trace(&trace, &ret);
|
||||
trace.rettime = cpu_clock(raw_smp_processor_id());
|
||||
trace.rettime = trace_clock_local();
|
||||
ftrace_graph_return(&trace);
|
||||
|
||||
if (unlikely(!ret)) {
|
||||
@@ -119,12 +134,7 @@ unsigned long ftrace_return_to_handler(void)
|
||||
|
||||
static int graph_trace_init(struct trace_array *tr)
|
||||
{
|
||||
int cpu, ret;
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
tracing_reset(tr, cpu);
|
||||
|
||||
ret = register_ftrace_graph(&trace_graph_return,
|
||||
int ret = register_ftrace_graph(&trace_graph_return,
|
||||
&trace_graph_entry);
|
||||
if (ret)
|
||||
return ret;
|
||||
@@ -187,15 +197,15 @@ print_graph_cpu(struct trace_seq *s, int cpu)
|
||||
static enum print_line_t
|
||||
print_graph_proc(struct trace_seq *s, pid_t pid)
|
||||
{
|
||||
int i;
|
||||
int ret;
|
||||
int len;
|
||||
char comm[8];
|
||||
int spaces = 0;
|
||||
char comm[TASK_COMM_LEN];
|
||||
/* sign + log10(MAX_INT) + '\0' */
|
||||
char pid_str[11];
|
||||
int spaces = 0;
|
||||
int ret;
|
||||
int len;
|
||||
int i;
|
||||
|
||||
strncpy(comm, trace_find_cmdline(pid), 7);
|
||||
trace_find_cmdline(pid, comm);
|
||||
comm[7] = '\0';
|
||||
sprintf(pid_str, "%d", pid);
|
||||
|
||||
@@ -228,17 +238,25 @@ print_graph_proc(struct trace_seq *s, pid_t pid)
|
||||
|
||||
/* If the pid changed since the last trace, output this event */
|
||||
static enum print_line_t
|
||||
verif_pid(struct trace_seq *s, pid_t pid, int cpu)
|
||||
verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
|
||||
{
|
||||
pid_t prev_pid;
|
||||
pid_t *last_pid;
|
||||
int ret;
|
||||
|
||||
if (last_pid[cpu] != -1 && last_pid[cpu] == pid)
|
||||
if (!data)
|
||||
return TRACE_TYPE_HANDLED;
|
||||
|
||||
prev_pid = last_pid[cpu];
|
||||
last_pid[cpu] = pid;
|
||||
last_pid = &(per_cpu_ptr(data, cpu)->last_pid);
|
||||
|
||||
if (*last_pid == pid)
|
||||
return TRACE_TYPE_HANDLED;
|
||||
|
||||
prev_pid = *last_pid;
|
||||
*last_pid = pid;
|
||||
|
||||
if (prev_pid == -1)
|
||||
return TRACE_TYPE_HANDLED;
|
||||
/*
|
||||
* Context-switch trace line:
|
||||
|
||||
@@ -250,34 +268,34 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu)
|
||||
ret = trace_seq_printf(s,
|
||||
" ------------------------------------------\n");
|
||||
if (!ret)
|
||||
TRACE_TYPE_PARTIAL_LINE;
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
ret = print_graph_cpu(s, cpu);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
TRACE_TYPE_PARTIAL_LINE;
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
ret = print_graph_proc(s, prev_pid);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
TRACE_TYPE_PARTIAL_LINE;
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
ret = trace_seq_printf(s, " => ");
|
||||
if (!ret)
|
||||
TRACE_TYPE_PARTIAL_LINE;
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
ret = print_graph_proc(s, pid);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
TRACE_TYPE_PARTIAL_LINE;
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
ret = trace_seq_printf(s,
|
||||
"\n ------------------------------------------\n\n");
|
||||
if (!ret)
|
||||
TRACE_TYPE_PARTIAL_LINE;
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
return ret;
|
||||
return TRACE_TYPE_HANDLED;
|
||||
}
|
||||
|
||||
static bool
|
||||
trace_branch_is_leaf(struct trace_iterator *iter,
|
||||
static struct ftrace_graph_ret_entry *
|
||||
get_return_for_leaf(struct trace_iterator *iter,
|
||||
struct ftrace_graph_ent_entry *curr)
|
||||
{
|
||||
struct ring_buffer_iter *ring_iter;
|
||||
@@ -286,65 +304,123 @@ trace_branch_is_leaf(struct trace_iterator *iter,
|
||||
|
||||
ring_iter = iter->buffer_iter[iter->cpu];
|
||||
|
||||
if (!ring_iter)
|
||||
return false;
|
||||
|
||||
event = ring_buffer_iter_peek(ring_iter, NULL);
|
||||
/* First peek to compare current entry and the next one */
|
||||
if (ring_iter)
|
||||
event = ring_buffer_iter_peek(ring_iter, NULL);
|
||||
else {
|
||||
/* We need to consume the current entry to see the next one */
|
||||
ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
|
||||
event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
|
||||
NULL);
|
||||
}
|
||||
|
||||
if (!event)
|
||||
return false;
|
||||
return NULL;
|
||||
|
||||
next = ring_buffer_event_data(event);
|
||||
|
||||
if (next->ent.type != TRACE_GRAPH_RET)
|
||||
return false;
|
||||
return NULL;
|
||||
|
||||
if (curr->ent.pid != next->ent.pid ||
|
||||
curr->graph_ent.func != next->ret.func)
|
||||
return false;
|
||||
return NULL;
|
||||
|
||||
return true;
|
||||
/* this is a leaf, now advance the iterator */
|
||||
if (ring_iter)
|
||||
ring_buffer_read(ring_iter, NULL);
|
||||
|
||||
return next;
|
||||
}
|
||||
|
||||
/* Signal a overhead of time execution to the output */
|
||||
static int
|
||||
print_graph_overhead(unsigned long long duration, struct trace_seq *s)
|
||||
{
|
||||
/* If duration disappear, we don't need anything */
|
||||
if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION))
|
||||
return 1;
|
||||
|
||||
/* Non nested entry or return */
|
||||
if (duration == -1)
|
||||
return trace_seq_printf(s, " ");
|
||||
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
|
||||
/* Duration exceeded 100 msecs */
|
||||
if (duration > 100000ULL)
|
||||
return trace_seq_printf(s, "! ");
|
||||
|
||||
/* Duration exceeded 10 msecs */
|
||||
if (duration > 10000ULL)
|
||||
return trace_seq_printf(s, "+ ");
|
||||
}
|
||||
|
||||
return trace_seq_printf(s, " ");
|
||||
}
|
||||
|
||||
static int print_graph_abs_time(u64 t, struct trace_seq *s)
|
||||
{
|
||||
unsigned long usecs_rem;
|
||||
|
||||
usecs_rem = do_div(t, NSEC_PER_SEC);
|
||||
usecs_rem /= 1000;
|
||||
|
||||
return trace_seq_printf(s, "%5lu.%06lu | ",
|
||||
(unsigned long)t, usecs_rem);
|
||||
}
|
||||
|
||||
static enum print_line_t
|
||||
print_graph_irq(struct trace_seq *s, unsigned long addr,
|
||||
enum trace_type type, int cpu, pid_t pid)
|
||||
print_graph_irq(struct trace_iterator *iter, unsigned long addr,
|
||||
enum trace_type type, int cpu, pid_t pid)
|
||||
{
|
||||
int ret;
|
||||
struct trace_seq *s = &iter->seq;
|
||||
|
||||
if (addr < (unsigned long)__irqentry_text_start ||
|
||||
addr >= (unsigned long)__irqentry_text_end)
|
||||
return TRACE_TYPE_UNHANDLED;
|
||||
|
||||
if (type == TRACE_GRAPH_ENT) {
|
||||
ret = trace_seq_printf(s, "==========> | ");
|
||||
} else {
|
||||
/* Cpu */
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
|
||||
ret = print_graph_cpu(s, cpu);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
/* Proc */
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
|
||||
ret = print_graph_proc(s, pid);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
ret = trace_seq_printf(s, " | ");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
/* No overhead */
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
|
||||
ret = trace_seq_printf(s, " ");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
ret = trace_seq_printf(s, "<========== |\n");
|
||||
/* Absolute time */
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
|
||||
ret = print_graph_abs_time(iter->ts, s);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
/* Cpu */
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
|
||||
ret = print_graph_cpu(s, cpu);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
/* Proc */
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
|
||||
ret = print_graph_proc(s, pid);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
ret = trace_seq_printf(s, " | ");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
/* No overhead */
|
||||
ret = print_graph_overhead(-1, s);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
if (type == TRACE_GRAPH_ENT)
|
||||
ret = trace_seq_printf(s, "==========>");
|
||||
else
|
||||
ret = trace_seq_printf(s, "<==========");
|
||||
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Don't close the duration column if haven't one */
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
|
||||
trace_seq_printf(s, " |");
|
||||
ret = trace_seq_printf(s, "\n");
|
||||
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
return TRACE_TYPE_HANDLED;
|
||||
@@ -363,7 +439,7 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s)
|
||||
sprintf(msecs_str, "%lu", (unsigned long) duration);
|
||||
|
||||
/* Print msecs */
|
||||
ret = trace_seq_printf(s, msecs_str);
|
||||
ret = trace_seq_printf(s, "%s", msecs_str);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
@@ -396,52 +472,47 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s)
|
||||
|
||||
}
|
||||
|
||||
/* Signal a overhead of time execution to the output */
|
||||
static int
|
||||
print_graph_overhead(unsigned long long duration, struct trace_seq *s)
|
||||
{
|
||||
/* Duration exceeded 100 msecs */
|
||||
if (duration > 100000ULL)
|
||||
return trace_seq_printf(s, "! ");
|
||||
|
||||
/* Duration exceeded 10 msecs */
|
||||
if (duration > 10000ULL)
|
||||
return trace_seq_printf(s, "+ ");
|
||||
|
||||
return trace_seq_printf(s, " ");
|
||||
}
|
||||
|
||||
/* Case of a leaf function on its call entry */
|
||||
static enum print_line_t
|
||||
print_graph_entry_leaf(struct trace_iterator *iter,
|
||||
struct ftrace_graph_ent_entry *entry, struct trace_seq *s)
|
||||
struct ftrace_graph_ent_entry *entry,
|
||||
struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s)
|
||||
{
|
||||
struct ftrace_graph_ret_entry *ret_entry;
|
||||
struct fgraph_data *data = iter->private;
|
||||
struct ftrace_graph_ret *graph_ret;
|
||||
struct ring_buffer_event *event;
|
||||
struct ftrace_graph_ent *call;
|
||||
unsigned long long duration;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
event = ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
|
||||
ret_entry = ring_buffer_event_data(event);
|
||||
graph_ret = &ret_entry->ret;
|
||||
call = &entry->graph_ent;
|
||||
duration = graph_ret->rettime - graph_ret->calltime;
|
||||
|
||||
/* Overhead */
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
|
||||
ret = print_graph_overhead(duration, s);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
if (data) {
|
||||
int cpu = iter->cpu;
|
||||
int *depth = &(per_cpu_ptr(data, cpu)->depth);
|
||||
|
||||
/*
|
||||
* Comments display at + 1 to depth. Since
|
||||
* this is a leaf function, keep the comments
|
||||
* equal to this depth.
|
||||
*/
|
||||
*depth = call->depth - 1;
|
||||
}
|
||||
|
||||
/* Duration */
|
||||
ret = print_graph_duration(duration, s);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
/* Overhead */
|
||||
ret = print_graph_overhead(duration, s);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Duration */
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
|
||||
ret = print_graph_duration(duration, s);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
/* Function */
|
||||
for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
|
||||
ret = trace_seq_printf(s, " ");
|
||||
@@ -461,33 +532,34 @@ print_graph_entry_leaf(struct trace_iterator *iter,
|
||||
}
|
||||
|
||||
static enum print_line_t
|
||||
print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
|
||||
struct trace_seq *s, pid_t pid, int cpu)
|
||||
print_graph_entry_nested(struct trace_iterator *iter,
|
||||
struct ftrace_graph_ent_entry *entry,
|
||||
struct trace_seq *s, int cpu)
|
||||
{
|
||||
int i;
|
||||
int ret;
|
||||
struct ftrace_graph_ent *call = &entry->graph_ent;
|
||||
struct fgraph_data *data = iter->private;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
/* No overhead */
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
|
||||
ret = trace_seq_printf(s, " ");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
if (data) {
|
||||
int cpu = iter->cpu;
|
||||
int *depth = &(per_cpu_ptr(data, cpu)->depth);
|
||||
|
||||
*depth = call->depth;
|
||||
}
|
||||
|
||||
/* Interrupt */
|
||||
ret = print_graph_irq(s, call->func, TRACE_GRAPH_ENT, cpu, pid);
|
||||
if (ret == TRACE_TYPE_UNHANDLED) {
|
||||
/* No time */
|
||||
/* No overhead */
|
||||
ret = print_graph_overhead(-1, s);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* No time */
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
|
||||
ret = trace_seq_printf(s, " | ");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
} else {
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
|
||||
/* Function */
|
||||
for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
|
||||
ret = trace_seq_printf(s, " ");
|
||||
@@ -503,20 +575,40 @@ print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
return TRACE_TYPE_HANDLED;
|
||||
/*
|
||||
* we already consumed the current entry to check the next one
|
||||
* and see if this is a leaf.
|
||||
*/
|
||||
return TRACE_TYPE_NO_CONSUME;
|
||||
}
|
||||
|
||||
static enum print_line_t
|
||||
print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
|
||||
struct trace_iterator *iter, int cpu)
|
||||
print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
|
||||
int type, unsigned long addr)
|
||||
{
|
||||
int ret;
|
||||
struct fgraph_data *data = iter->private;
|
||||
struct trace_entry *ent = iter->ent;
|
||||
int cpu = iter->cpu;
|
||||
int ret;
|
||||
|
||||
/* Pid */
|
||||
if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE)
|
||||
if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
if (type) {
|
||||
/* Interrupt */
|
||||
ret = print_graph_irq(iter, addr, type, cpu, ent->pid);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
/* Absolute time */
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
|
||||
ret = print_graph_abs_time(iter->ts, s);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
/* Cpu */
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
|
||||
ret = print_graph_cpu(s, cpu);
|
||||
@@ -535,54 +627,65 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
if (trace_branch_is_leaf(iter, field))
|
||||
return print_graph_entry_leaf(iter, field, s);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static enum print_line_t
|
||||
print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
|
||||
struct trace_iterator *iter)
|
||||
{
|
||||
int cpu = iter->cpu;
|
||||
struct ftrace_graph_ent *call = &field->graph_ent;
|
||||
struct ftrace_graph_ret_entry *leaf_ret;
|
||||
|
||||
if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func))
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
leaf_ret = get_return_for_leaf(iter, field);
|
||||
if (leaf_ret)
|
||||
return print_graph_entry_leaf(iter, field, leaf_ret, s);
|
||||
else
|
||||
return print_graph_entry_nested(field, s, iter->ent->pid, cpu);
|
||||
return print_graph_entry_nested(iter, field, s, cpu);
|
||||
|
||||
}
|
||||
|
||||
static enum print_line_t
|
||||
print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
|
||||
struct trace_entry *ent, int cpu)
|
||||
struct trace_entry *ent, struct trace_iterator *iter)
|
||||
{
|
||||
int i;
|
||||
int ret;
|
||||
unsigned long long duration = trace->rettime - trace->calltime;
|
||||
struct fgraph_data *data = iter->private;
|
||||
pid_t pid = ent->pid;
|
||||
int cpu = iter->cpu;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
/* Pid */
|
||||
if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE)
|
||||
if (data) {
|
||||
int cpu = iter->cpu;
|
||||
int *depth = &(per_cpu_ptr(data, cpu)->depth);
|
||||
|
||||
/*
|
||||
* Comments display at + 1 to depth. This is the
|
||||
* return from a function, we now want the comments
|
||||
* to display at the same level of the bracket.
|
||||
*/
|
||||
*depth = trace->depth - 1;
|
||||
}
|
||||
|
||||
if (print_graph_prologue(iter, s, 0, 0))
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Cpu */
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
|
||||
ret = print_graph_cpu(s, cpu);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
/* Proc */
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
|
||||
ret = print_graph_proc(s, ent->pid);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
ret = trace_seq_printf(s, " | ");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
/* Overhead */
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
|
||||
ret = print_graph_overhead(duration, s);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
ret = print_graph_overhead(duration, s);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Duration */
|
||||
ret = print_graph_duration(duration, s);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
|
||||
ret = print_graph_duration(duration, s);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
/* Closing brace */
|
||||
for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
|
||||
@@ -603,7 +706,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
ret = print_graph_irq(s, trace->func, TRACE_GRAPH_RET, cpu, ent->pid);
|
||||
ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, cpu, pid);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
@@ -611,61 +714,73 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
|
||||
}
|
||||
|
||||
static enum print_line_t
|
||||
print_graph_comment(struct print_entry *trace, struct trace_seq *s,
|
||||
struct trace_entry *ent, struct trace_iterator *iter)
|
||||
print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
|
||||
struct trace_iterator *iter)
|
||||
{
|
||||
int i;
|
||||
unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
|
||||
struct fgraph_data *data = iter->private;
|
||||
struct trace_event *event;
|
||||
int depth = 0;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
/* Pid */
|
||||
if (verif_pid(s, ent->pid, iter->cpu) == TRACE_TYPE_PARTIAL_LINE)
|
||||
if (data)
|
||||
depth = per_cpu_ptr(data, iter->cpu)->depth;
|
||||
|
||||
if (print_graph_prologue(iter, s, 0, 0))
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Cpu */
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
|
||||
ret = print_graph_cpu(s, iter->cpu);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
/* Proc */
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
|
||||
ret = print_graph_proc(s, ent->pid);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
ret = trace_seq_printf(s, " | ");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
/* No overhead */
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
|
||||
ret = trace_seq_printf(s, " ");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
/* No time */
|
||||
ret = trace_seq_printf(s, " | ");
|
||||
ret = print_graph_overhead(-1, s);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* No time */
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
|
||||
ret = trace_seq_printf(s, " | ");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
/* Indentation */
|
||||
if (trace->depth > 0)
|
||||
for (i = 0; i < (trace->depth + 1) * TRACE_GRAPH_INDENT; i++) {
|
||||
if (depth > 0)
|
||||
for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
|
||||
ret = trace_seq_printf(s, " ");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
/* The comment */
|
||||
ret = trace_seq_printf(s, "/* %s", trace->buf);
|
||||
ret = trace_seq_printf(s, "/* ");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
if (ent->flags & TRACE_FLAG_CONT)
|
||||
trace_seq_print_cont(s, iter);
|
||||
switch (iter->ent->type) {
|
||||
case TRACE_BPRINT:
|
||||
ret = trace_print_bprintk_msg_only(iter);
|
||||
if (ret != TRACE_TYPE_HANDLED)
|
||||
return ret;
|
||||
break;
|
||||
case TRACE_PRINT:
|
||||
ret = trace_print_printk_msg_only(iter);
|
||||
if (ret != TRACE_TYPE_HANDLED)
|
||||
return ret;
|
||||
break;
|
||||
default:
|
||||
event = ftrace_find_event(ent->type);
|
||||
if (!event)
|
||||
return TRACE_TYPE_UNHANDLED;
|
||||
|
||||
ret = event->trace(iter, sym_flags);
|
||||
if (ret != TRACE_TYPE_HANDLED)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Strip ending newline */
|
||||
if (s->buffer[s->len - 1] == '\n') {
|
||||
s->buffer[s->len - 1] = '\0';
|
||||
s->len--;
|
||||
}
|
||||
|
||||
ret = trace_seq_printf(s, " */\n");
|
||||
if (!ret)
|
||||
@@ -678,62 +793,91 @@ print_graph_comment(struct print_entry *trace, struct trace_seq *s,
|
||||
enum print_line_t
|
||||
print_graph_function(struct trace_iterator *iter)
|
||||
{
|
||||
struct trace_seq *s = &iter->seq;
|
||||
struct trace_entry *entry = iter->ent;
|
||||
struct trace_seq *s = &iter->seq;
|
||||
|
||||
switch (entry->type) {
|
||||
case TRACE_GRAPH_ENT: {
|
||||
struct ftrace_graph_ent_entry *field;
|
||||
trace_assign_type(field, entry);
|
||||
return print_graph_entry(field, s, iter,
|
||||
iter->cpu);
|
||||
return print_graph_entry(field, s, iter);
|
||||
}
|
||||
case TRACE_GRAPH_RET: {
|
||||
struct ftrace_graph_ret_entry *field;
|
||||
trace_assign_type(field, entry);
|
||||
return print_graph_return(&field->ret, s, entry, iter->cpu);
|
||||
}
|
||||
case TRACE_PRINT: {
|
||||
struct print_entry *field;
|
||||
trace_assign_type(field, entry);
|
||||
return print_graph_comment(field, s, entry, iter);
|
||||
return print_graph_return(&field->ret, s, entry, iter);
|
||||
}
|
||||
default:
|
||||
return TRACE_TYPE_UNHANDLED;
|
||||
return print_graph_comment(s, entry, iter);
|
||||
}
|
||||
|
||||
return TRACE_TYPE_HANDLED;
|
||||
}
|
||||
|
||||
static void print_graph_headers(struct seq_file *s)
|
||||
{
|
||||
/* 1st line */
|
||||
seq_printf(s, "# ");
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
|
||||
seq_printf(s, " TIME ");
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
|
||||
seq_printf(s, "CPU ");
|
||||
seq_printf(s, "CPU");
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
|
||||
seq_printf(s, "TASK/PID ");
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD)
|
||||
seq_printf(s, "OVERHEAD/");
|
||||
seq_printf(s, "DURATION FUNCTION CALLS\n");
|
||||
seq_printf(s, " TASK/PID ");
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
|
||||
seq_printf(s, " DURATION ");
|
||||
seq_printf(s, " FUNCTION CALLS\n");
|
||||
|
||||
/* 2nd line */
|
||||
seq_printf(s, "# ");
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
|
||||
seq_printf(s, " | ");
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
|
||||
seq_printf(s, "| ");
|
||||
seq_printf(s, "| ");
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
|
||||
seq_printf(s, "| | ");
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
|
||||
seq_printf(s, "| ");
|
||||
seq_printf(s, "| | | | |\n");
|
||||
} else
|
||||
seq_printf(s, " | | | | |\n");
|
||||
seq_printf(s, " | | ");
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
|
||||
seq_printf(s, " | | ");
|
||||
seq_printf(s, " | | | |\n");
|
||||
}
|
||||
|
||||
static void graph_trace_open(struct trace_iterator *iter)
|
||||
{
|
||||
/* pid and depth on the last trace processed */
|
||||
struct fgraph_data *data = alloc_percpu(struct fgraph_data);
|
||||
int cpu;
|
||||
|
||||
if (!data)
|
||||
pr_warning("function graph tracer: not enough memory\n");
|
||||
else
|
||||
for_each_possible_cpu(cpu) {
|
||||
pid_t *pid = &(per_cpu_ptr(data, cpu)->last_pid);
|
||||
int *depth = &(per_cpu_ptr(data, cpu)->depth);
|
||||
*pid = -1;
|
||||
*depth = 0;
|
||||
}
|
||||
|
||||
iter->private = data;
|
||||
}
|
||||
|
||||
static void graph_trace_close(struct trace_iterator *iter)
|
||||
{
|
||||
free_percpu(iter->private);
|
||||
}
|
||||
|
||||
static struct tracer graph_trace __read_mostly = {
|
||||
.name = "function_graph",
|
||||
.init = graph_trace_init,
|
||||
.reset = graph_trace_reset,
|
||||
.name = "function_graph",
|
||||
.open = graph_trace_open,
|
||||
.close = graph_trace_close,
|
||||
.wait_pipe = poll_wait_pipe,
|
||||
.init = graph_trace_init,
|
||||
.reset = graph_trace_reset,
|
||||
.print_line = print_graph_function,
|
||||
.print_header = print_graph_headers,
|
||||
.flags = &tracer_flags,
|
||||
#ifdef CONFIG_FTRACE_SELFTEST
|
||||
.selftest = trace_selftest_startup_function_graph,
|
||||
#endif
|
||||
};
|
||||
|
||||
static __init int init_graph_trace(void)
|
||||
|
@@ -1,30 +1,53 @@
|
||||
/*
|
||||
* h/w branch tracer for x86 based on bts
|
||||
*
|
||||
* Copyright (C) 2008 Markus Metzger <markus.t.metzger@gmail.com>
|
||||
*
|
||||
* Copyright (C) 2008-2009 Intel Corporation.
|
||||
* Markus Metzger <markus.t.metzger@gmail.com>, 2008-2009
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/fs.h>
|
||||
|
||||
#include <asm/ds.h>
|
||||
|
||||
#include "trace.h"
|
||||
#include "trace_output.h"
|
||||
|
||||
|
||||
#define SIZEOF_BTS (1 << 13)
|
||||
|
||||
/*
|
||||
* The tracer lock protects the below per-cpu tracer array.
|
||||
* It needs to be held to:
|
||||
* - start tracing on all cpus
|
||||
* - stop tracing on all cpus
|
||||
* - start tracing on a single hotplug cpu
|
||||
* - stop tracing on a single hotplug cpu
|
||||
* - read the trace from all cpus
|
||||
* - read the trace from a single cpu
|
||||
*/
|
||||
static DEFINE_SPINLOCK(bts_tracer_lock);
|
||||
static DEFINE_PER_CPU(struct bts_tracer *, tracer);
|
||||
static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer);
|
||||
|
||||
#define this_tracer per_cpu(tracer, smp_processor_id())
|
||||
#define this_buffer per_cpu(buffer, smp_processor_id())
|
||||
|
||||
static int __read_mostly trace_hw_branches_enabled;
|
||||
static struct trace_array *hw_branch_trace __read_mostly;
|
||||
|
||||
|
||||
/*
|
||||
* Start tracing on the current cpu.
|
||||
* The argument is ignored.
|
||||
*
|
||||
* pre: bts_tracer_lock must be locked.
|
||||
*/
|
||||
static void bts_trace_start_cpu(void *arg)
|
||||
{
|
||||
if (this_tracer)
|
||||
@@ -42,14 +65,20 @@ static void bts_trace_start_cpu(void *arg)
|
||||
|
||||
static void bts_trace_start(struct trace_array *tr)
|
||||
{
|
||||
int cpu;
|
||||
spin_lock(&bts_tracer_lock);
|
||||
|
||||
tracing_reset_online_cpus(tr);
|
||||
on_each_cpu(bts_trace_start_cpu, NULL, 1);
|
||||
trace_hw_branches_enabled = 1;
|
||||
|
||||
for_each_cpu(cpu, cpu_possible_mask)
|
||||
smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1);
|
||||
spin_unlock(&bts_tracer_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Stop tracing on the current cpu.
|
||||
* The argument is ignored.
|
||||
*
|
||||
* pre: bts_tracer_lock must be locked.
|
||||
*/
|
||||
static void bts_trace_stop_cpu(void *arg)
|
||||
{
|
||||
if (this_tracer) {
|
||||
@@ -60,26 +89,60 @@ static void bts_trace_stop_cpu(void *arg)
|
||||
|
||||
static void bts_trace_stop(struct trace_array *tr)
|
||||
{
|
||||
int cpu;
|
||||
spin_lock(&bts_tracer_lock);
|
||||
|
||||
for_each_cpu(cpu, cpu_possible_mask)
|
||||
smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1);
|
||||
trace_hw_branches_enabled = 0;
|
||||
on_each_cpu(bts_trace_stop_cpu, NULL, 1);
|
||||
|
||||
spin_unlock(&bts_tracer_lock);
|
||||
}
|
||||
|
||||
static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
unsigned int cpu = (unsigned long)hcpu;
|
||||
|
||||
spin_lock(&bts_tracer_lock);
|
||||
|
||||
if (!trace_hw_branches_enabled)
|
||||
goto out;
|
||||
|
||||
switch (action) {
|
||||
case CPU_ONLINE:
|
||||
case CPU_DOWN_FAILED:
|
||||
smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1);
|
||||
break;
|
||||
case CPU_DOWN_PREPARE:
|
||||
smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1);
|
||||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
spin_unlock(&bts_tracer_lock);
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static struct notifier_block bts_hotcpu_notifier __cpuinitdata = {
|
||||
.notifier_call = bts_hotcpu_handler
|
||||
};
|
||||
|
||||
static int bts_trace_init(struct trace_array *tr)
|
||||
{
|
||||
tracing_reset_online_cpus(tr);
|
||||
hw_branch_trace = tr;
|
||||
|
||||
bts_trace_start(tr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bts_trace_reset(struct trace_array *tr)
|
||||
{
|
||||
bts_trace_stop(tr);
|
||||
}
|
||||
|
||||
static void bts_trace_print_header(struct seq_file *m)
|
||||
{
|
||||
seq_puts(m,
|
||||
"# CPU# FROM TO FUNCTION\n");
|
||||
seq_puts(m,
|
||||
"# | | | |\n");
|
||||
seq_puts(m, "# CPU# TO <- FROM\n");
|
||||
}
|
||||
|
||||
static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
|
||||
@@ -87,15 +150,15 @@ static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
|
||||
struct trace_entry *entry = iter->ent;
|
||||
struct trace_seq *seq = &iter->seq;
|
||||
struct hw_branch_entry *it;
|
||||
unsigned long symflags = TRACE_ITER_SYM_OFFSET;
|
||||
|
||||
trace_assign_type(it, entry);
|
||||
|
||||
if (entry->type == TRACE_HW_BRANCHES) {
|
||||
if (trace_seq_printf(seq, "%4d ", entry->cpu) &&
|
||||
trace_seq_printf(seq, "0x%016llx -> 0x%016llx ",
|
||||
it->from, it->to) &&
|
||||
(!it->from ||
|
||||
seq_print_ip_sym(seq, it->from, /* sym_flags = */ 0)) &&
|
||||
if (trace_seq_printf(seq, "%4d ", iter->cpu) &&
|
||||
seq_print_ip_sym(seq, it->to, symflags) &&
|
||||
trace_seq_printf(seq, "\t <- ") &&
|
||||
seq_print_ip_sym(seq, it->from, symflags) &&
|
||||
trace_seq_printf(seq, "\n"))
|
||||
return TRACE_TYPE_HANDLED;
|
||||
return TRACE_TYPE_PARTIAL_LINE;;
|
||||
@@ -103,26 +166,42 @@ static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
|
||||
return TRACE_TYPE_UNHANDLED;
|
||||
}
|
||||
|
||||
void trace_hw_branch(struct trace_array *tr, u64 from, u64 to)
|
||||
void trace_hw_branch(u64 from, u64 to)
|
||||
{
|
||||
struct trace_array *tr = hw_branch_trace;
|
||||
struct ring_buffer_event *event;
|
||||
struct hw_branch_entry *entry;
|
||||
unsigned long irq;
|
||||
unsigned long irq1;
|
||||
int cpu;
|
||||
|
||||
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq);
|
||||
if (!event)
|
||||
if (unlikely(!tr))
|
||||
return;
|
||||
|
||||
if (unlikely(!trace_hw_branches_enabled))
|
||||
return;
|
||||
|
||||
local_irq_save(irq1);
|
||||
cpu = raw_smp_processor_id();
|
||||
if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
|
||||
goto out;
|
||||
|
||||
event = trace_buffer_lock_reserve(tr, TRACE_HW_BRANCHES,
|
||||
sizeof(*entry), 0, 0);
|
||||
if (!event)
|
||||
goto out;
|
||||
entry = ring_buffer_event_data(event);
|
||||
tracing_generic_entry_update(&entry->ent, 0, from);
|
||||
entry->ent.type = TRACE_HW_BRANCHES;
|
||||
entry->ent.cpu = smp_processor_id();
|
||||
entry->from = from;
|
||||
entry->to = to;
|
||||
ring_buffer_unlock_commit(tr->buffer, event, irq);
|
||||
trace_buffer_unlock_commit(tr, event, 0, 0);
|
||||
|
||||
out:
|
||||
atomic_dec(&tr->data[cpu]->disabled);
|
||||
local_irq_restore(irq1);
|
||||
}
|
||||
|
||||
static void trace_bts_at(struct trace_array *tr,
|
||||
const struct bts_trace *trace, void *at)
|
||||
static void trace_bts_at(const struct bts_trace *trace, void *at)
|
||||
{
|
||||
struct bts_struct bts;
|
||||
int err = 0;
|
||||
@@ -137,18 +216,29 @@ static void trace_bts_at(struct trace_array *tr,
|
||||
|
||||
switch (bts.qualifier) {
|
||||
case BTS_BRANCH:
|
||||
trace_hw_branch(tr, bts.variant.lbr.from, bts.variant.lbr.to);
|
||||
trace_hw_branch(bts.variant.lbr.from, bts.variant.lbr.to);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Collect the trace on the current cpu and write it into the ftrace buffer.
|
||||
*
|
||||
* pre: bts_tracer_lock must be locked
|
||||
*/
|
||||
static void trace_bts_cpu(void *arg)
|
||||
{
|
||||
struct trace_array *tr = (struct trace_array *) arg;
|
||||
const struct bts_trace *trace;
|
||||
unsigned char *at;
|
||||
|
||||
if (!this_tracer)
|
||||
if (unlikely(!tr))
|
||||
return;
|
||||
|
||||
if (unlikely(atomic_read(&tr->data[raw_smp_processor_id()]->disabled)))
|
||||
return;
|
||||
|
||||
if (unlikely(!this_tracer))
|
||||
return;
|
||||
|
||||
ds_suspend_bts(this_tracer);
|
||||
@@ -158,11 +248,11 @@ static void trace_bts_cpu(void *arg)
|
||||
|
||||
for (at = trace->ds.top; (void *)at < trace->ds.end;
|
||||
at += trace->ds.size)
|
||||
trace_bts_at(tr, trace, at);
|
||||
trace_bts_at(trace, at);
|
||||
|
||||
for (at = trace->ds.begin; (void *)at < trace->ds.top;
|
||||
at += trace->ds.size)
|
||||
trace_bts_at(tr, trace, at);
|
||||
trace_bts_at(trace, at);
|
||||
|
||||
out:
|
||||
ds_resume_bts(this_tracer);
|
||||
@@ -170,26 +260,43 @@ out:
|
||||
|
||||
static void trace_bts_prepare(struct trace_iterator *iter)
|
||||
{
|
||||
int cpu;
|
||||
spin_lock(&bts_tracer_lock);
|
||||
|
||||
for_each_cpu(cpu, cpu_possible_mask)
|
||||
smp_call_function_single(cpu, trace_bts_cpu, iter->tr, 1);
|
||||
on_each_cpu(trace_bts_cpu, iter->tr, 1);
|
||||
|
||||
spin_unlock(&bts_tracer_lock);
|
||||
}
|
||||
|
||||
static void trace_bts_close(struct trace_iterator *iter)
|
||||
{
|
||||
tracing_reset_online_cpus(iter->tr);
|
||||
}
|
||||
|
||||
void trace_hw_branch_oops(void)
|
||||
{
|
||||
spin_lock(&bts_tracer_lock);
|
||||
|
||||
trace_bts_cpu(hw_branch_trace);
|
||||
|
||||
spin_unlock(&bts_tracer_lock);
|
||||
}
|
||||
|
||||
struct tracer bts_tracer __read_mostly =
|
||||
{
|
||||
.name = "hw-branch-tracer",
|
||||
.init = bts_trace_init,
|
||||
.reset = bts_trace_stop,
|
||||
.reset = bts_trace_reset,
|
||||
.print_header = bts_trace_print_header,
|
||||
.print_line = bts_trace_print_line,
|
||||
.start = bts_trace_start,
|
||||
.stop = bts_trace_stop,
|
||||
.open = trace_bts_prepare
|
||||
.open = trace_bts_prepare,
|
||||
.close = trace_bts_close
|
||||
};
|
||||
|
||||
__init static int init_bts_trace(void)
|
||||
{
|
||||
register_hotcpu_notifier(&bts_hotcpu_notifier);
|
||||
return register_tracer(&bts_tracer);
|
||||
}
|
||||
device_initcall(init_bts_trace);
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* trace irqs off criticall timings
|
||||
* trace irqs off critical timings
|
||||
*
|
||||
* Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
|
||||
* Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
|
||||
@@ -32,6 +32,8 @@ enum {
|
||||
|
||||
static int trace_type __read_mostly;
|
||||
|
||||
static int save_lat_flag;
|
||||
|
||||
#ifdef CONFIG_PREEMPT_TRACER
|
||||
static inline int
|
||||
preempt_trace(void)
|
||||
@@ -95,7 +97,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
|
||||
disabled = atomic_inc_return(&data->disabled);
|
||||
|
||||
if (likely(disabled == 1))
|
||||
trace_function(tr, data, ip, parent_ip, flags, preempt_count());
|
||||
trace_function(tr, ip, parent_ip, flags, preempt_count());
|
||||
|
||||
atomic_dec(&data->disabled);
|
||||
}
|
||||
@@ -153,7 +155,7 @@ check_critical_timing(struct trace_array *tr,
|
||||
if (!report_latency(delta))
|
||||
goto out_unlock;
|
||||
|
||||
trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc);
|
||||
trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
|
||||
|
||||
latency = nsecs_to_usecs(delta);
|
||||
|
||||
@@ -177,7 +179,7 @@ out:
|
||||
data->critical_sequence = max_sequence;
|
||||
data->preempt_timestamp = ftrace_now(cpu);
|
||||
tracing_reset(tr, cpu);
|
||||
trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc);
|
||||
trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
|
||||
}
|
||||
|
||||
static inline void
|
||||
@@ -210,7 +212,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
|
||||
|
||||
local_save_flags(flags);
|
||||
|
||||
trace_function(tr, data, ip, parent_ip, flags, preempt_count());
|
||||
trace_function(tr, ip, parent_ip, flags, preempt_count());
|
||||
|
||||
per_cpu(tracing_cpu, cpu) = 1;
|
||||
|
||||
@@ -244,7 +246,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
|
||||
atomic_inc(&data->disabled);
|
||||
|
||||
local_save_flags(flags);
|
||||
trace_function(tr, data, ip, parent_ip, flags, preempt_count());
|
||||
trace_function(tr, ip, parent_ip, flags, preempt_count());
|
||||
check_critical_timing(tr, data, parent_ip ? : ip, cpu);
|
||||
data->critical_start = 0;
|
||||
atomic_dec(&data->disabled);
|
||||
@@ -353,33 +355,26 @@ void trace_preempt_off(unsigned long a0, unsigned long a1)
|
||||
}
|
||||
#endif /* CONFIG_PREEMPT_TRACER */
|
||||
|
||||
/*
|
||||
* save_tracer_enabled is used to save the state of the tracer_enabled
|
||||
* variable when we disable it when we open a trace output file.
|
||||
*/
|
||||
static int save_tracer_enabled;
|
||||
|
||||
static void start_irqsoff_tracer(struct trace_array *tr)
|
||||
{
|
||||
register_ftrace_function(&trace_ops);
|
||||
if (tracing_is_enabled()) {
|
||||
if (tracing_is_enabled())
|
||||
tracer_enabled = 1;
|
||||
save_tracer_enabled = 1;
|
||||
} else {
|
||||
else
|
||||
tracer_enabled = 0;
|
||||
save_tracer_enabled = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void stop_irqsoff_tracer(struct trace_array *tr)
|
||||
{
|
||||
tracer_enabled = 0;
|
||||
save_tracer_enabled = 0;
|
||||
unregister_ftrace_function(&trace_ops);
|
||||
}
|
||||
|
||||
static void __irqsoff_tracer_init(struct trace_array *tr)
|
||||
{
|
||||
save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
|
||||
trace_flags |= TRACE_ITER_LATENCY_FMT;
|
||||
|
||||
tracing_max_latency = 0;
|
||||
irqsoff_trace = tr;
|
||||
/* make sure that the tracer is visible */
|
||||
@@ -390,30 +385,19 @@ static void __irqsoff_tracer_init(struct trace_array *tr)
|
||||
static void irqsoff_tracer_reset(struct trace_array *tr)
|
||||
{
|
||||
stop_irqsoff_tracer(tr);
|
||||
|
||||
if (!save_lat_flag)
|
||||
trace_flags &= ~TRACE_ITER_LATENCY_FMT;
|
||||
}
|
||||
|
||||
static void irqsoff_tracer_start(struct trace_array *tr)
|
||||
{
|
||||
tracer_enabled = 1;
|
||||
save_tracer_enabled = 1;
|
||||
}
|
||||
|
||||
static void irqsoff_tracer_stop(struct trace_array *tr)
|
||||
{
|
||||
tracer_enabled = 0;
|
||||
save_tracer_enabled = 0;
|
||||
}
|
||||
|
||||
static void irqsoff_tracer_open(struct trace_iterator *iter)
|
||||
{
|
||||
/* stop the trace while dumping */
|
||||
tracer_enabled = 0;
|
||||
}
|
||||
|
||||
static void irqsoff_tracer_close(struct trace_iterator *iter)
|
||||
{
|
||||
/* restart tracing */
|
||||
tracer_enabled = save_tracer_enabled;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IRQSOFF_TRACER
|
||||
@@ -431,8 +415,6 @@ static struct tracer irqsoff_tracer __read_mostly =
|
||||
.reset = irqsoff_tracer_reset,
|
||||
.start = irqsoff_tracer_start,
|
||||
.stop = irqsoff_tracer_stop,
|
||||
.open = irqsoff_tracer_open,
|
||||
.close = irqsoff_tracer_close,
|
||||
.print_max = 1,
|
||||
#ifdef CONFIG_FTRACE_SELFTEST
|
||||
.selftest = trace_selftest_startup_irqsoff,
|
||||
@@ -459,8 +441,6 @@ static struct tracer preemptoff_tracer __read_mostly =
|
||||
.reset = irqsoff_tracer_reset,
|
||||
.start = irqsoff_tracer_start,
|
||||
.stop = irqsoff_tracer_stop,
|
||||
.open = irqsoff_tracer_open,
|
||||
.close = irqsoff_tracer_close,
|
||||
.print_max = 1,
|
||||
#ifdef CONFIG_FTRACE_SELFTEST
|
||||
.selftest = trace_selftest_startup_preemptoff,
|
||||
@@ -489,8 +469,6 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
|
||||
.reset = irqsoff_tracer_reset,
|
||||
.start = irqsoff_tracer_start,
|
||||
.stop = irqsoff_tracer_stop,
|
||||
.open = irqsoff_tracer_open,
|
||||
.close = irqsoff_tracer_close,
|
||||
.print_max = 1,
|
||||
#ifdef CONFIG_FTRACE_SELFTEST
|
||||
.selftest = trace_selftest_startup_preemptirqsoff,
|
||||
|
@@ -12,6 +12,7 @@
|
||||
#include <asm/atomic.h>
|
||||
|
||||
#include "trace.h"
|
||||
#include "trace_output.h"
|
||||
|
||||
struct header_iter {
|
||||
struct pci_dev *dev;
|
||||
@@ -183,21 +184,22 @@ static enum print_line_t mmio_print_rw(struct trace_iterator *iter)
|
||||
switch (rw->opcode) {
|
||||
case MMIO_READ:
|
||||
ret = trace_seq_printf(s,
|
||||
"R %d %lu.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
|
||||
"R %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
|
||||
rw->width, secs, usec_rem, rw->map_id,
|
||||
(unsigned long long)rw->phys,
|
||||
rw->value, rw->pc, 0);
|
||||
break;
|
||||
case MMIO_WRITE:
|
||||
ret = trace_seq_printf(s,
|
||||
"W %d %lu.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
|
||||
"W %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
|
||||
rw->width, secs, usec_rem, rw->map_id,
|
||||
(unsigned long long)rw->phys,
|
||||
rw->value, rw->pc, 0);
|
||||
break;
|
||||
case MMIO_UNKNOWN_OP:
|
||||
ret = trace_seq_printf(s,
|
||||
"UNKNOWN %lu.%06lu %d 0x%llx %02x,%02x,%02x 0x%lx %d\n",
|
||||
"UNKNOWN %u.%06lu %d 0x%llx %02lx,%02lx,"
|
||||
"%02lx 0x%lx %d\n",
|
||||
secs, usec_rem, rw->map_id,
|
||||
(unsigned long long)rw->phys,
|
||||
(rw->value >> 16) & 0xff, (rw->value >> 8) & 0xff,
|
||||
@@ -229,14 +231,14 @@ static enum print_line_t mmio_print_map(struct trace_iterator *iter)
|
||||
switch (m->opcode) {
|
||||
case MMIO_PROBE:
|
||||
ret = trace_seq_printf(s,
|
||||
"MAP %lu.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n",
|
||||
"MAP %u.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n",
|
||||
secs, usec_rem, m->map_id,
|
||||
(unsigned long long)m->phys, m->virt, m->len,
|
||||
0UL, 0);
|
||||
break;
|
||||
case MMIO_UNPROBE:
|
||||
ret = trace_seq_printf(s,
|
||||
"UNMAP %lu.%06lu %d 0x%lx %d\n",
|
||||
"UNMAP %u.%06lu %d 0x%lx %d\n",
|
||||
secs, usec_rem, m->map_id, 0UL, 0);
|
||||
break;
|
||||
default:
|
||||
@@ -255,18 +257,15 @@ static enum print_line_t mmio_print_mark(struct trace_iterator *iter)
|
||||
const char *msg = print->buf;
|
||||
struct trace_seq *s = &iter->seq;
|
||||
unsigned long long t = ns2usecs(iter->ts);
|
||||
unsigned long usec_rem = do_div(t, 1000000ULL);
|
||||
unsigned long usec_rem = do_div(t, USEC_PER_SEC);
|
||||
unsigned secs = (unsigned long)t;
|
||||
int ret;
|
||||
|
||||
/* The trailing newline must be in the message. */
|
||||
ret = trace_seq_printf(s, "MARK %lu.%06lu %s", secs, usec_rem, msg);
|
||||
ret = trace_seq_printf(s, "MARK %u.%06lu %s", secs, usec_rem, msg);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
if (entry->flags & TRACE_FLAG_CONT)
|
||||
trace_seq_print_cont(s, iter);
|
||||
|
||||
return TRACE_TYPE_HANDLED;
|
||||
}
|
||||
|
||||
@@ -308,21 +307,17 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
|
||||
{
|
||||
struct ring_buffer_event *event;
|
||||
struct trace_mmiotrace_rw *entry;
|
||||
unsigned long irq_flags;
|
||||
int pc = preempt_count();
|
||||
|
||||
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
|
||||
&irq_flags);
|
||||
event = trace_buffer_lock_reserve(tr, TRACE_MMIO_RW,
|
||||
sizeof(*entry), 0, pc);
|
||||
if (!event) {
|
||||
atomic_inc(&dropped_count);
|
||||
return;
|
||||
}
|
||||
entry = ring_buffer_event_data(event);
|
||||
tracing_generic_entry_update(&entry->ent, 0, preempt_count());
|
||||
entry->ent.type = TRACE_MMIO_RW;
|
||||
entry->rw = *rw;
|
||||
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
|
||||
|
||||
trace_wake_up();
|
||||
trace_buffer_unlock_commit(tr, event, 0, pc);
|
||||
}
|
||||
|
||||
void mmio_trace_rw(struct mmiotrace_rw *rw)
|
||||
@@ -338,21 +333,17 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
|
||||
{
|
||||
struct ring_buffer_event *event;
|
||||
struct trace_mmiotrace_map *entry;
|
||||
unsigned long irq_flags;
|
||||
int pc = preempt_count();
|
||||
|
||||
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
|
||||
&irq_flags);
|
||||
event = trace_buffer_lock_reserve(tr, TRACE_MMIO_MAP,
|
||||
sizeof(*entry), 0, pc);
|
||||
if (!event) {
|
||||
atomic_inc(&dropped_count);
|
||||
return;
|
||||
}
|
||||
entry = ring_buffer_event_data(event);
|
||||
tracing_generic_entry_update(&entry->ent, 0, preempt_count());
|
||||
entry->ent.type = TRACE_MMIO_MAP;
|
||||
entry->map = *map;
|
||||
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
|
||||
|
||||
trace_wake_up();
|
||||
trace_buffer_unlock_commit(tr, event, 0, pc);
|
||||
}
|
||||
|
||||
void mmio_trace_mapping(struct mmiotrace_map *map)
|
||||
@@ -368,5 +359,5 @@ void mmio_trace_mapping(struct mmiotrace_map *map)
|
||||
|
||||
int mmio_trace_printk(const char *fmt, va_list args)
|
||||
{
|
||||
return trace_vprintk(0, -1, fmt, args);
|
||||
return trace_vprintk(0, fmt, args);
|
||||
}
|
||||
|
@@ -47,12 +47,7 @@ static void stop_nop_trace(struct trace_array *tr)
|
||||
|
||||
static int nop_trace_init(struct trace_array *tr)
|
||||
{
|
||||
int cpu;
|
||||
ctx_trace = tr;
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
tracing_reset(tr, cpu);
|
||||
|
||||
start_nop_trace(tr);
|
||||
return 0;
|
||||
}
|
||||
@@ -96,6 +91,7 @@ struct tracer nop_trace __read_mostly =
|
||||
.name = "nop",
|
||||
.init = nop_trace_init,
|
||||
.reset = nop_trace_reset,
|
||||
.wait_pipe = poll_wait_pipe,
|
||||
#ifdef CONFIG_FTRACE_SELFTEST
|
||||
.selftest = trace_selftest_startup_nop,
|
||||
#endif
|
||||
|
1017
kernel/trace/trace_output.c
Normal file
1017
kernel/trace/trace_output.c
Normal file
File diff suppressed because it is too large
Load Diff
71
kernel/trace/trace_output.h
Normal file
71
kernel/trace/trace_output.h
Normal file
@@ -0,0 +1,71 @@
|
||||
#ifndef __TRACE_EVENTS_H
|
||||
#define __TRACE_EVENTS_H
|
||||
|
||||
#include "trace.h"
|
||||
|
||||
typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
|
||||
int flags);
|
||||
|
||||
struct trace_event {
|
||||
struct hlist_node node;
|
||||
int type;
|
||||
trace_print_func trace;
|
||||
trace_print_func raw;
|
||||
trace_print_func hex;
|
||||
trace_print_func binary;
|
||||
};
|
||||
|
||||
extern enum print_line_t
|
||||
trace_print_bprintk_msg_only(struct trace_iterator *iter);
|
||||
extern enum print_line_t
|
||||
trace_print_printk_msg_only(struct trace_iterator *iter);
|
||||
|
||||
extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
|
||||
__attribute__ ((format (printf, 2, 3)));
|
||||
extern int
|
||||
trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary);
|
||||
extern int
|
||||
seq_print_ip_sym(struct trace_seq *s, unsigned long ip,
|
||||
unsigned long sym_flags);
|
||||
extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
|
||||
size_t cnt);
|
||||
extern int trace_seq_puts(struct trace_seq *s, const char *str);
|
||||
extern int trace_seq_putc(struct trace_seq *s, unsigned char c);
|
||||
extern int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len);
|
||||
extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
|
||||
size_t len);
|
||||
extern void *trace_seq_reserve(struct trace_seq *s, size_t len);
|
||||
extern int trace_seq_path(struct trace_seq *s, struct path *path);
|
||||
extern int seq_print_userip_objs(const struct userstack_entry *entry,
|
||||
struct trace_seq *s, unsigned long sym_flags);
|
||||
extern int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
|
||||
unsigned long ip, unsigned long sym_flags);
|
||||
|
||||
extern int trace_print_context(struct trace_iterator *iter);
|
||||
extern int trace_print_lat_context(struct trace_iterator *iter);
|
||||
|
||||
extern struct trace_event *ftrace_find_event(int type);
|
||||
extern int register_ftrace_event(struct trace_event *event);
|
||||
extern int unregister_ftrace_event(struct trace_event *event);
|
||||
|
||||
extern enum print_line_t trace_nop_print(struct trace_iterator *iter,
|
||||
int flags);
|
||||
|
||||
#define MAX_MEMHEX_BYTES 8
|
||||
#define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1)
|
||||
|
||||
#define SEQ_PUT_FIELD_RET(s, x) \
|
||||
do { \
|
||||
if (!trace_seq_putmem(s, &(x), sizeof(x))) \
|
||||
return TRACE_TYPE_PARTIAL_LINE; \
|
||||
} while (0)
|
||||
|
||||
#define SEQ_PUT_HEX_FIELD_RET(s, x) \
|
||||
do { \
|
||||
BUILD_BUG_ON(sizeof(x) > MAX_MEMHEX_BYTES); \
|
||||
if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \
|
||||
return TRACE_TYPE_PARTIAL_LINE; \
|
||||
} while (0)
|
||||
|
||||
#endif
|
||||
|
@@ -11,15 +11,113 @@
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <trace/power.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include "trace.h"
|
||||
#include "trace_output.h"
|
||||
|
||||
static struct trace_array *power_trace;
|
||||
static int __read_mostly trace_power_enabled;
|
||||
|
||||
static void probe_power_start(struct power_trace *it, unsigned int type,
|
||||
unsigned int level)
|
||||
{
|
||||
if (!trace_power_enabled)
|
||||
return;
|
||||
|
||||
memset(it, 0, sizeof(struct power_trace));
|
||||
it->state = level;
|
||||
it->type = type;
|
||||
it->stamp = ktime_get();
|
||||
}
|
||||
|
||||
|
||||
static void probe_power_end(struct power_trace *it)
|
||||
{
|
||||
struct ring_buffer_event *event;
|
||||
struct trace_power *entry;
|
||||
struct trace_array_cpu *data;
|
||||
struct trace_array *tr = power_trace;
|
||||
|
||||
if (!trace_power_enabled)
|
||||
return;
|
||||
|
||||
preempt_disable();
|
||||
it->end = ktime_get();
|
||||
data = tr->data[smp_processor_id()];
|
||||
|
||||
event = trace_buffer_lock_reserve(tr, TRACE_POWER,
|
||||
sizeof(*entry), 0, 0);
|
||||
if (!event)
|
||||
goto out;
|
||||
entry = ring_buffer_event_data(event);
|
||||
entry->state_data = *it;
|
||||
trace_buffer_unlock_commit(tr, event, 0, 0);
|
||||
out:
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static void probe_power_mark(struct power_trace *it, unsigned int type,
|
||||
unsigned int level)
|
||||
{
|
||||
struct ring_buffer_event *event;
|
||||
struct trace_power *entry;
|
||||
struct trace_array_cpu *data;
|
||||
struct trace_array *tr = power_trace;
|
||||
|
||||
if (!trace_power_enabled)
|
||||
return;
|
||||
|
||||
memset(it, 0, sizeof(struct power_trace));
|
||||
it->state = level;
|
||||
it->type = type;
|
||||
it->stamp = ktime_get();
|
||||
preempt_disable();
|
||||
it->end = it->stamp;
|
||||
data = tr->data[smp_processor_id()];
|
||||
|
||||
event = trace_buffer_lock_reserve(tr, TRACE_POWER,
|
||||
sizeof(*entry), 0, 0);
|
||||
if (!event)
|
||||
goto out;
|
||||
entry = ring_buffer_event_data(event);
|
||||
entry->state_data = *it;
|
||||
trace_buffer_unlock_commit(tr, event, 0, 0);
|
||||
out:
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static int tracing_power_register(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = register_trace_power_start(probe_power_start);
|
||||
if (ret) {
|
||||
pr_info("power trace: Couldn't activate tracepoint"
|
||||
" probe to trace_power_start\n");
|
||||
return ret;
|
||||
}
|
||||
ret = register_trace_power_end(probe_power_end);
|
||||
if (ret) {
|
||||
pr_info("power trace: Couldn't activate tracepoint"
|
||||
" probe to trace_power_end\n");
|
||||
goto fail_start;
|
||||
}
|
||||
ret = register_trace_power_mark(probe_power_mark);
|
||||
if (ret) {
|
||||
pr_info("power trace: Couldn't activate tracepoint"
|
||||
" probe to trace_power_mark\n");
|
||||
goto fail_end;
|
||||
}
|
||||
return ret;
|
||||
fail_end:
|
||||
unregister_trace_power_end(probe_power_end);
|
||||
fail_start:
|
||||
unregister_trace_power_start(probe_power_start);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void start_power_trace(struct trace_array *tr)
|
||||
{
|
||||
@@ -31,6 +129,14 @@ static void stop_power_trace(struct trace_array *tr)
|
||||
trace_power_enabled = 0;
|
||||
}
|
||||
|
||||
static void power_trace_reset(struct trace_array *tr)
|
||||
{
|
||||
trace_power_enabled = 0;
|
||||
unregister_trace_power_start(probe_power_start);
|
||||
unregister_trace_power_end(probe_power_end);
|
||||
unregister_trace_power_mark(probe_power_mark);
|
||||
}
|
||||
|
||||
|
||||
static int power_trace_init(struct trace_array *tr)
|
||||
{
|
||||
@@ -38,6 +144,7 @@ static int power_trace_init(struct trace_array *tr)
|
||||
power_trace = tr;
|
||||
|
||||
trace_power_enabled = 1;
|
||||
tracing_power_register();
|
||||
|
||||
for_each_cpu(cpu, cpu_possible_mask)
|
||||
tracing_reset(tr, cpu);
|
||||
@@ -85,7 +192,7 @@ static struct tracer power_tracer __read_mostly =
|
||||
.init = power_trace_init,
|
||||
.start = start_power_trace,
|
||||
.stop = stop_power_trace,
|
||||
.reset = stop_power_trace,
|
||||
.reset = power_trace_reset,
|
||||
.print_line = power_print_line,
|
||||
};
|
||||
|
||||
@@ -94,86 +201,3 @@ static int init_power_trace(void)
|
||||
return register_tracer(&power_tracer);
|
||||
}
|
||||
device_initcall(init_power_trace);
|
||||
|
||||
void trace_power_start(struct power_trace *it, unsigned int type,
|
||||
unsigned int level)
|
||||
{
|
||||
if (!trace_power_enabled)
|
||||
return;
|
||||
|
||||
memset(it, 0, sizeof(struct power_trace));
|
||||
it->state = level;
|
||||
it->type = type;
|
||||
it->stamp = ktime_get();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(trace_power_start);
|
||||
|
||||
|
||||
void trace_power_end(struct power_trace *it)
|
||||
{
|
||||
struct ring_buffer_event *event;
|
||||
struct trace_power *entry;
|
||||
struct trace_array_cpu *data;
|
||||
unsigned long irq_flags;
|
||||
struct trace_array *tr = power_trace;
|
||||
|
||||
if (!trace_power_enabled)
|
||||
return;
|
||||
|
||||
preempt_disable();
|
||||
it->end = ktime_get();
|
||||
data = tr->data[smp_processor_id()];
|
||||
|
||||
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
|
||||
&irq_flags);
|
||||
if (!event)
|
||||
goto out;
|
||||
entry = ring_buffer_event_data(event);
|
||||
tracing_generic_entry_update(&entry->ent, 0, 0);
|
||||
entry->ent.type = TRACE_POWER;
|
||||
entry->state_data = *it;
|
||||
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
|
||||
|
||||
trace_wake_up();
|
||||
|
||||
out:
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(trace_power_end);
|
||||
|
||||
void trace_power_mark(struct power_trace *it, unsigned int type,
|
||||
unsigned int level)
|
||||
{
|
||||
struct ring_buffer_event *event;
|
||||
struct trace_power *entry;
|
||||
struct trace_array_cpu *data;
|
||||
unsigned long irq_flags;
|
||||
struct trace_array *tr = power_trace;
|
||||
|
||||
if (!trace_power_enabled)
|
||||
return;
|
||||
|
||||
memset(it, 0, sizeof(struct power_trace));
|
||||
it->state = level;
|
||||
it->type = type;
|
||||
it->stamp = ktime_get();
|
||||
preempt_disable();
|
||||
it->end = it->stamp;
|
||||
data = tr->data[smp_processor_id()];
|
||||
|
||||
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
|
||||
&irq_flags);
|
||||
if (!event)
|
||||
goto out;
|
||||
entry = ring_buffer_event_data(event);
|
||||
tracing_generic_entry_update(&entry->ent, 0, 0);
|
||||
entry->ent.type = TRACE_POWER;
|
||||
entry->state_data = *it;
|
||||
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
|
||||
|
||||
trace_wake_up();
|
||||
|
||||
out:
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(trace_power_mark);
|
||||
|
270
kernel/trace/trace_printk.c
Normal file
270
kernel/trace/trace_printk.c
Normal file
@@ -0,0 +1,270 @@
|
||||
/*
|
||||
* trace binary printk
|
||||
*
|
||||
* Copyright (C) 2008 Lai Jiangshan <laijs@cn.fujitsu.com>
|
||||
*
|
||||
*/
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/marker.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/fs.h>
|
||||
|
||||
#include "trace.h"
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
|
||||
/*
|
||||
* modules trace_printk()'s formats are autosaved in struct trace_bprintk_fmt
|
||||
* which are queued on trace_bprintk_fmt_list.
|
||||
*/
|
||||
static LIST_HEAD(trace_bprintk_fmt_list);
|
||||
|
||||
/* serialize accesses to trace_bprintk_fmt_list */
|
||||
static DEFINE_MUTEX(btrace_mutex);
|
||||
|
||||
struct trace_bprintk_fmt {
|
||||
struct list_head list;
|
||||
char fmt[0];
|
||||
};
|
||||
|
||||
static inline struct trace_bprintk_fmt *lookup_format(const char *fmt)
|
||||
{
|
||||
struct trace_bprintk_fmt *pos;
|
||||
list_for_each_entry(pos, &trace_bprintk_fmt_list, list) {
|
||||
if (!strcmp(pos->fmt, fmt))
|
||||
return pos;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static
|
||||
void hold_module_trace_bprintk_format(const char **start, const char **end)
|
||||
{
|
||||
const char **iter;
|
||||
|
||||
mutex_lock(&btrace_mutex);
|
||||
for (iter = start; iter < end; iter++) {
|
||||
struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter);
|
||||
if (tb_fmt) {
|
||||
*iter = tb_fmt->fmt;
|
||||
continue;
|
||||
}
|
||||
|
||||
tb_fmt = kmalloc(offsetof(struct trace_bprintk_fmt, fmt)
|
||||
+ strlen(*iter) + 1, GFP_KERNEL);
|
||||
if (tb_fmt) {
|
||||
list_add_tail(&tb_fmt->list, &trace_bprintk_fmt_list);
|
||||
strcpy(tb_fmt->fmt, *iter);
|
||||
*iter = tb_fmt->fmt;
|
||||
} else
|
||||
*iter = NULL;
|
||||
}
|
||||
mutex_unlock(&btrace_mutex);
|
||||
}
|
||||
|
||||
static int module_trace_bprintk_format_notify(struct notifier_block *self,
|
||||
unsigned long val, void *data)
|
||||
{
|
||||
struct module *mod = data;
|
||||
if (mod->num_trace_bprintk_fmt) {
|
||||
const char **start = mod->trace_bprintk_fmt_start;
|
||||
const char **end = start + mod->num_trace_bprintk_fmt;
|
||||
|
||||
if (val == MODULE_STATE_COMING)
|
||||
hold_module_trace_bprintk_format(start, end);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_MODULES */
|
||||
__init static int
|
||||
module_trace_bprintk_format_notify(struct notifier_block *self,
|
||||
unsigned long val, void *data)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_MODULES */
|
||||
|
||||
|
||||
__initdata_or_module static
|
||||
struct notifier_block module_trace_bprintk_format_nb = {
|
||||
.notifier_call = module_trace_bprintk_format_notify,
|
||||
};
|
||||
|
||||
int __trace_bprintk(unsigned long ip, const char *fmt, ...)
|
||||
{
|
||||
int ret;
|
||||
va_list ap;
|
||||
|
||||
if (unlikely(!fmt))
|
||||
return 0;
|
||||
|
||||
if (!(trace_flags & TRACE_ITER_PRINTK))
|
||||
return 0;
|
||||
|
||||
va_start(ap, fmt);
|
||||
ret = trace_vbprintk(ip, fmt, ap);
|
||||
va_end(ap);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__trace_bprintk);
|
||||
|
||||
int __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap)
|
||||
{
|
||||
if (unlikely(!fmt))
|
||||
return 0;
|
||||
|
||||
if (!(trace_flags & TRACE_ITER_PRINTK))
|
||||
return 0;
|
||||
|
||||
return trace_vbprintk(ip, fmt, ap);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__ftrace_vbprintk);
|
||||
|
||||
int __trace_printk(unsigned long ip, const char *fmt, ...)
|
||||
{
|
||||
int ret;
|
||||
va_list ap;
|
||||
|
||||
if (!(trace_flags & TRACE_ITER_PRINTK))
|
||||
return 0;
|
||||
|
||||
va_start(ap, fmt);
|
||||
ret = trace_vprintk(ip, fmt, ap);
|
||||
va_end(ap);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__trace_printk);
|
||||
|
||||
int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap)
|
||||
{
|
||||
if (!(trace_flags & TRACE_ITER_PRINTK))
|
||||
return 0;
|
||||
|
||||
return trace_vprintk(ip, fmt, ap);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__ftrace_vprintk);
|
||||
|
||||
static void *
|
||||
t_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
{
|
||||
const char **fmt = m->private;
|
||||
const char **next = fmt;
|
||||
|
||||
(*pos)++;
|
||||
|
||||
if ((unsigned long)fmt >= (unsigned long)__stop___trace_bprintk_fmt)
|
||||
return NULL;
|
||||
|
||||
next = fmt;
|
||||
m->private = ++next;
|
||||
|
||||
return fmt;
|
||||
}
|
||||
|
||||
static void *t_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
return t_next(m, NULL, pos);
|
||||
}
|
||||
|
||||
static int t_show(struct seq_file *m, void *v)
|
||||
{
|
||||
const char **fmt = v;
|
||||
const char *str = *fmt;
|
||||
int i;
|
||||
|
||||
seq_printf(m, "0x%lx : \"", (unsigned long)fmt);
|
||||
|
||||
/*
|
||||
* Tabs and new lines need to be converted.
|
||||
*/
|
||||
for (i = 0; str[i]; i++) {
|
||||
switch (str[i]) {
|
||||
case '\n':
|
||||
seq_puts(m, "\\n");
|
||||
break;
|
||||
case '\t':
|
||||
seq_puts(m, "\\t");
|
||||
break;
|
||||
case '\\':
|
||||
seq_puts(m, "\\");
|
||||
break;
|
||||
case '"':
|
||||
seq_puts(m, "\\\"");
|
||||
break;
|
||||
default:
|
||||
seq_putc(m, str[i]);
|
||||
}
|
||||
}
|
||||
seq_puts(m, "\"\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void t_stop(struct seq_file *m, void *p)
|
||||
{
|
||||
}
|
||||
|
||||
static const struct seq_operations show_format_seq_ops = {
|
||||
.start = t_start,
|
||||
.next = t_next,
|
||||
.show = t_show,
|
||||
.stop = t_stop,
|
||||
};
|
||||
|
||||
static int
|
||||
ftrace_formats_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = seq_open(file, &show_format_seq_ops);
|
||||
if (!ret) {
|
||||
struct seq_file *m = file->private_data;
|
||||
|
||||
m->private = __start___trace_bprintk_fmt;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct file_operations ftrace_formats_fops = {
|
||||
.open = ftrace_formats_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release,
|
||||
};
|
||||
|
||||
static __init int init_trace_printk_function_export(void)
|
||||
{
|
||||
struct dentry *d_tracer;
|
||||
struct dentry *entry;
|
||||
|
||||
d_tracer = tracing_init_dentry();
|
||||
if (!d_tracer)
|
||||
return 0;
|
||||
|
||||
entry = debugfs_create_file("printk_formats", 0444, d_tracer,
|
||||
NULL, &ftrace_formats_fops);
|
||||
if (!entry)
|
||||
pr_warning("Could not create debugfs "
|
||||
"'printk_formats' entry\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
fs_initcall(init_trace_printk_function_export);
|
||||
|
||||
static __init int init_trace_printk(void)
|
||||
{
|
||||
return register_module_notifier(&module_trace_bprintk_format_nb);
|
||||
}
|
||||
|
||||
early_initcall(init_trace_printk);
|
@@ -18,6 +18,7 @@ static struct trace_array *ctx_trace;
|
||||
static int __read_mostly tracer_enabled;
|
||||
static int sched_ref;
|
||||
static DEFINE_MUTEX(sched_register_mutex);
|
||||
static int sched_stopped;
|
||||
|
||||
static void
|
||||
probe_sched_switch(struct rq *__rq, struct task_struct *prev,
|
||||
@@ -28,7 +29,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
|
||||
int cpu;
|
||||
int pc;
|
||||
|
||||
if (!sched_ref)
|
||||
if (!sched_ref || sched_stopped)
|
||||
return;
|
||||
|
||||
tracing_record_cmdline(prev);
|
||||
@@ -43,7 +44,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
|
||||
data = ctx_trace->data[cpu];
|
||||
|
||||
if (likely(!atomic_read(&data->disabled)))
|
||||
tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc);
|
||||
tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc);
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
@@ -66,7 +67,7 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
|
||||
data = ctx_trace->data[cpu];
|
||||
|
||||
if (likely(!atomic_read(&data->disabled)))
|
||||
tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
|
||||
tracing_sched_wakeup_trace(ctx_trace, wakee, current,
|
||||
flags, pc);
|
||||
|
||||
local_irq_restore(flags);
|
||||
@@ -93,7 +94,7 @@ static int tracing_sched_register(void)
|
||||
ret = register_trace_sched_switch(probe_sched_switch);
|
||||
if (ret) {
|
||||
pr_info("sched trace: Couldn't activate tracepoint"
|
||||
" probe to kernel_sched_schedule\n");
|
||||
" probe to kernel_sched_switch\n");
|
||||
goto fail_deprobe_wake_new;
|
||||
}
|
||||
|
||||
@@ -185,12 +186,6 @@ void tracing_sched_switch_assign_trace(struct trace_array *tr)
|
||||
ctx_trace = tr;
|
||||
}
|
||||
|
||||
static void start_sched_trace(struct trace_array *tr)
|
||||
{
|
||||
tracing_reset_online_cpus(tr);
|
||||
tracing_start_sched_switch_record();
|
||||
}
|
||||
|
||||
static void stop_sched_trace(struct trace_array *tr)
|
||||
{
|
||||
tracing_stop_sched_switch_record();
|
||||
@@ -199,7 +194,8 @@ static void stop_sched_trace(struct trace_array *tr)
|
||||
static int sched_switch_trace_init(struct trace_array *tr)
|
||||
{
|
||||
ctx_trace = tr;
|
||||
start_sched_trace(tr);
|
||||
tracing_reset_online_cpus(tr);
|
||||
tracing_start_sched_switch_record();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -211,13 +207,12 @@ static void sched_switch_trace_reset(struct trace_array *tr)
|
||||
|
||||
static void sched_switch_trace_start(struct trace_array *tr)
|
||||
{
|
||||
tracing_reset_online_cpus(tr);
|
||||
tracing_start_sched_switch();
|
||||
sched_stopped = 0;
|
||||
}
|
||||
|
||||
static void sched_switch_trace_stop(struct trace_array *tr)
|
||||
{
|
||||
tracing_stop_sched_switch();
|
||||
sched_stopped = 1;
|
||||
}
|
||||
|
||||
static struct tracer sched_switch_trace __read_mostly =
|
||||
@@ -227,6 +222,7 @@ static struct tracer sched_switch_trace __read_mostly =
|
||||
.reset = sched_switch_trace_reset,
|
||||
.start = sched_switch_trace_start,
|
||||
.stop = sched_switch_trace_stop,
|
||||
.wait_pipe = poll_wait_pipe,
|
||||
#ifdef CONFIG_FTRACE_SELFTEST
|
||||
.selftest = trace_selftest_startup_sched_switch,
|
||||
#endif
|
||||
|
@@ -25,12 +25,15 @@ static int __read_mostly tracer_enabled;
|
||||
static struct task_struct *wakeup_task;
|
||||
static int wakeup_cpu;
|
||||
static unsigned wakeup_prio = -1;
|
||||
static int wakeup_rt;
|
||||
|
||||
static raw_spinlock_t wakeup_lock =
|
||||
(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
||||
|
||||
static void __wakeup_reset(struct trace_array *tr);
|
||||
|
||||
static int save_lat_flag;
|
||||
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
/*
|
||||
* irqsoff uses its own tracer function to keep the overhead down:
|
||||
@@ -71,7 +74,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
|
||||
if (task_cpu(wakeup_task) != cpu)
|
||||
goto unlock;
|
||||
|
||||
trace_function(tr, data, ip, parent_ip, flags, pc);
|
||||
trace_function(tr, ip, parent_ip, flags, pc);
|
||||
|
||||
unlock:
|
||||
__raw_spin_unlock(&wakeup_lock);
|
||||
@@ -151,7 +154,8 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
|
||||
if (unlikely(!tracer_enabled || next != wakeup_task))
|
||||
goto out_unlock;
|
||||
|
||||
trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
|
||||
trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
|
||||
tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
|
||||
|
||||
/*
|
||||
* usecs conversion is slow so we try to delay the conversion
|
||||
@@ -182,13 +186,10 @@ out:
|
||||
|
||||
static void __wakeup_reset(struct trace_array *tr)
|
||||
{
|
||||
struct trace_array_cpu *data;
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
data = tr->data[cpu];
|
||||
for_each_possible_cpu(cpu)
|
||||
tracing_reset(tr, cpu);
|
||||
}
|
||||
|
||||
wakeup_cpu = -1;
|
||||
wakeup_prio = -1;
|
||||
@@ -213,6 +214,7 @@ static void wakeup_reset(struct trace_array *tr)
|
||||
static void
|
||||
probe_wakeup(struct rq *rq, struct task_struct *p, int success)
|
||||
{
|
||||
struct trace_array_cpu *data;
|
||||
int cpu = smp_processor_id();
|
||||
unsigned long flags;
|
||||
long disabled;
|
||||
@@ -224,7 +226,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success)
|
||||
tracing_record_cmdline(p);
|
||||
tracing_record_cmdline(current);
|
||||
|
||||
if (likely(!rt_task(p)) ||
|
||||
if ((wakeup_rt && !rt_task(p)) ||
|
||||
p->prio >= wakeup_prio ||
|
||||
p->prio >= current->prio)
|
||||
return;
|
||||
@@ -252,9 +254,10 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success)
|
||||
|
||||
local_save_flags(flags);
|
||||
|
||||
wakeup_trace->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu);
|
||||
trace_function(wakeup_trace, wakeup_trace->data[wakeup_cpu],
|
||||
CALLER_ADDR1, CALLER_ADDR2, flags, pc);
|
||||
data = wakeup_trace->data[wakeup_cpu];
|
||||
data->preempt_timestamp = ftrace_now(cpu);
|
||||
tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
|
||||
trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
|
||||
|
||||
out_locked:
|
||||
__raw_spin_unlock(&wakeup_lock);
|
||||
@@ -262,12 +265,6 @@ out:
|
||||
atomic_dec(&wakeup_trace->data[cpu]->disabled);
|
||||
}
|
||||
|
||||
/*
|
||||
* save_tracer_enabled is used to save the state of the tracer_enabled
|
||||
* variable when we disable it when we open a trace output file.
|
||||
*/
|
||||
static int save_tracer_enabled;
|
||||
|
||||
static void start_wakeup_tracer(struct trace_array *tr)
|
||||
{
|
||||
int ret;
|
||||
@@ -289,7 +286,7 @@ static void start_wakeup_tracer(struct trace_array *tr)
|
||||
ret = register_trace_sched_switch(probe_wakeup_sched_switch);
|
||||
if (ret) {
|
||||
pr_info("sched trace: Couldn't activate tracepoint"
|
||||
" probe to kernel_sched_schedule\n");
|
||||
" probe to kernel_sched_switch\n");
|
||||
goto fail_deprobe_wake_new;
|
||||
}
|
||||
|
||||
@@ -306,13 +303,10 @@ static void start_wakeup_tracer(struct trace_array *tr)
|
||||
|
||||
register_ftrace_function(&trace_ops);
|
||||
|
||||
if (tracing_is_enabled()) {
|
||||
if (tracing_is_enabled())
|
||||
tracer_enabled = 1;
|
||||
save_tracer_enabled = 1;
|
||||
} else {
|
||||
else
|
||||
tracer_enabled = 0;
|
||||
save_tracer_enabled = 0;
|
||||
}
|
||||
|
||||
return;
|
||||
fail_deprobe_wake_new:
|
||||
@@ -324,54 +318,54 @@ fail_deprobe:
|
||||
static void stop_wakeup_tracer(struct trace_array *tr)
|
||||
{
|
||||
tracer_enabled = 0;
|
||||
save_tracer_enabled = 0;
|
||||
unregister_ftrace_function(&trace_ops);
|
||||
unregister_trace_sched_switch(probe_wakeup_sched_switch);
|
||||
unregister_trace_sched_wakeup_new(probe_wakeup);
|
||||
unregister_trace_sched_wakeup(probe_wakeup);
|
||||
}
|
||||
|
||||
static int wakeup_tracer_init(struct trace_array *tr)
|
||||
static int __wakeup_tracer_init(struct trace_array *tr)
|
||||
{
|
||||
save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
|
||||
trace_flags |= TRACE_ITER_LATENCY_FMT;
|
||||
|
||||
tracing_max_latency = 0;
|
||||
wakeup_trace = tr;
|
||||
start_wakeup_tracer(tr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int wakeup_tracer_init(struct trace_array *tr)
|
||||
{
|
||||
wakeup_rt = 0;
|
||||
return __wakeup_tracer_init(tr);
|
||||
}
|
||||
|
||||
static int wakeup_rt_tracer_init(struct trace_array *tr)
|
||||
{
|
||||
wakeup_rt = 1;
|
||||
return __wakeup_tracer_init(tr);
|
||||
}
|
||||
|
||||
static void wakeup_tracer_reset(struct trace_array *tr)
|
||||
{
|
||||
stop_wakeup_tracer(tr);
|
||||
/* make sure we put back any tasks we are tracing */
|
||||
wakeup_reset(tr);
|
||||
|
||||
if (!save_lat_flag)
|
||||
trace_flags &= ~TRACE_ITER_LATENCY_FMT;
|
||||
}
|
||||
|
||||
static void wakeup_tracer_start(struct trace_array *tr)
|
||||
{
|
||||
wakeup_reset(tr);
|
||||
tracer_enabled = 1;
|
||||
save_tracer_enabled = 1;
|
||||
}
|
||||
|
||||
static void wakeup_tracer_stop(struct trace_array *tr)
|
||||
{
|
||||
tracer_enabled = 0;
|
||||
save_tracer_enabled = 0;
|
||||
}
|
||||
|
||||
static void wakeup_tracer_open(struct trace_iterator *iter)
|
||||
{
|
||||
/* stop the trace while dumping */
|
||||
tracer_enabled = 0;
|
||||
}
|
||||
|
||||
static void wakeup_tracer_close(struct trace_iterator *iter)
|
||||
{
|
||||
/* forget about any processes we were recording */
|
||||
if (save_tracer_enabled) {
|
||||
wakeup_reset(iter->tr);
|
||||
tracer_enabled = 1;
|
||||
}
|
||||
}
|
||||
|
||||
static struct tracer wakeup_tracer __read_mostly =
|
||||
@@ -381,8 +375,20 @@ static struct tracer wakeup_tracer __read_mostly =
|
||||
.reset = wakeup_tracer_reset,
|
||||
.start = wakeup_tracer_start,
|
||||
.stop = wakeup_tracer_stop,
|
||||
.open = wakeup_tracer_open,
|
||||
.close = wakeup_tracer_close,
|
||||
.print_max = 1,
|
||||
#ifdef CONFIG_FTRACE_SELFTEST
|
||||
.selftest = trace_selftest_startup_wakeup,
|
||||
#endif
|
||||
};
|
||||
|
||||
static struct tracer wakeup_rt_tracer __read_mostly =
|
||||
{
|
||||
.name = "wakeup_rt",
|
||||
.init = wakeup_rt_tracer_init,
|
||||
.reset = wakeup_tracer_reset,
|
||||
.start = wakeup_tracer_start,
|
||||
.stop = wakeup_tracer_stop,
|
||||
.wait_pipe = poll_wait_pipe,
|
||||
.print_max = 1,
|
||||
#ifdef CONFIG_FTRACE_SELFTEST
|
||||
.selftest = trace_selftest_startup_wakeup,
|
||||
@@ -397,6 +403,10 @@ __init static int init_wakeup_tracer(void)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = register_tracer(&wakeup_rt_tracer);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
device_initcall(init_wakeup_tracer);
|
||||
|
@@ -1,5 +1,6 @@
|
||||
/* Include in trace.c */
|
||||
|
||||
#include <linux/stringify.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
@@ -9,11 +10,12 @@ static inline int trace_valid_entry(struct trace_entry *entry)
|
||||
case TRACE_FN:
|
||||
case TRACE_CTX:
|
||||
case TRACE_WAKE:
|
||||
case TRACE_CONT:
|
||||
case TRACE_STACK:
|
||||
case TRACE_PRINT:
|
||||
case TRACE_SPECIAL:
|
||||
case TRACE_BRANCH:
|
||||
case TRACE_GRAPH_ENT:
|
||||
case TRACE_GRAPH_RET:
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
@@ -99,9 +101,6 @@ static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
|
||||
#define __STR(x) #x
|
||||
#define STR(x) __STR(x)
|
||||
|
||||
/* Test dynamic code modification and ftrace filters */
|
||||
int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
|
||||
struct trace_array *tr,
|
||||
@@ -125,17 +124,17 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
|
||||
func();
|
||||
|
||||
/*
|
||||
* Some archs *cough*PowerPC*cough* add charachters to the
|
||||
* Some archs *cough*PowerPC*cough* add characters to the
|
||||
* start of the function names. We simply put a '*' to
|
||||
* accomodate them.
|
||||
* accommodate them.
|
||||
*/
|
||||
func_name = "*" STR(DYN_FTRACE_TEST_NAME);
|
||||
func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
|
||||
|
||||
/* filter only on our function */
|
||||
ftrace_set_filter(func_name, strlen(func_name), 1);
|
||||
|
||||
/* enable tracing */
|
||||
ret = trace->init(tr);
|
||||
ret = tracer_init(trace, tr);
|
||||
if (ret) {
|
||||
warn_failed_init_tracer(trace, ret);
|
||||
goto out;
|
||||
@@ -209,7 +208,7 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
|
||||
ftrace_enabled = 1;
|
||||
tracer_enabled = 1;
|
||||
|
||||
ret = trace->init(tr);
|
||||
ret = tracer_init(trace, tr);
|
||||
if (ret) {
|
||||
warn_failed_init_tracer(trace, ret);
|
||||
goto out;
|
||||
@@ -247,6 +246,90 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
|
||||
}
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
|
||||
/* Maximum number of functions to trace before diagnosing a hang */
|
||||
#define GRAPH_MAX_FUNC_TEST 100000000
|
||||
|
||||
static void __ftrace_dump(bool disable_tracing);
|
||||
static unsigned int graph_hang_thresh;
|
||||
|
||||
/* Wrap the real function entry probe to avoid possible hanging */
|
||||
static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
|
||||
{
|
||||
/* This is harmlessly racy, we want to approximately detect a hang */
|
||||
if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
|
||||
ftrace_graph_stop();
|
||||
printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
|
||||
if (ftrace_dump_on_oops)
|
||||
__ftrace_dump(false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return trace_graph_entry(trace);
|
||||
}
|
||||
|
||||
/*
|
||||
* Pretty much the same than for the function tracer from which the selftest
|
||||
* has been borrowed.
|
||||
*/
|
||||
int
|
||||
trace_selftest_startup_function_graph(struct tracer *trace,
|
||||
struct trace_array *tr)
|
||||
{
|
||||
int ret;
|
||||
unsigned long count;
|
||||
|
||||
/*
|
||||
* Simulate the init() callback but we attach a watchdog callback
|
||||
* to detect and recover from possible hangs
|
||||
*/
|
||||
tracing_reset_online_cpus(tr);
|
||||
ret = register_ftrace_graph(&trace_graph_return,
|
||||
&trace_graph_entry_watchdog);
|
||||
if (ret) {
|
||||
warn_failed_init_tracer(trace, ret);
|
||||
goto out;
|
||||
}
|
||||
tracing_start_cmdline_record();
|
||||
|
||||
/* Sleep for a 1/10 of a second */
|
||||
msleep(100);
|
||||
|
||||
/* Have we just recovered from a hang? */
|
||||
if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
|
||||
tracing_selftest_disabled = true;
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
tracing_stop();
|
||||
|
||||
/* check the trace buffer */
|
||||
ret = trace_test_buffer(tr, &count);
|
||||
|
||||
trace->reset(tr);
|
||||
tracing_start();
|
||||
|
||||
if (!ret && !count) {
|
||||
printk(KERN_CONT ".. no entries found ..");
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Don't test dynamic tracing, the function tracer already did */
|
||||
|
||||
out:
|
||||
/* Stop it if we failed */
|
||||
if (ret)
|
||||
ftrace_graph_stop();
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
||||
|
||||
|
||||
#ifdef CONFIG_IRQSOFF_TRACER
|
||||
int
|
||||
trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
|
||||
@@ -256,7 +339,7 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
|
||||
int ret;
|
||||
|
||||
/* start the tracing */
|
||||
ret = trace->init(tr);
|
||||
ret = tracer_init(trace, tr);
|
||||
if (ret) {
|
||||
warn_failed_init_tracer(trace, ret);
|
||||
return ret;
|
||||
@@ -268,6 +351,14 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
|
||||
local_irq_disable();
|
||||
udelay(100);
|
||||
local_irq_enable();
|
||||
|
||||
/*
|
||||
* Stop the tracer to avoid a warning subsequent
|
||||
* to buffer flipping failure because tracing_stop()
|
||||
* disables the tr and max buffers, making flipping impossible
|
||||
* in case of parallels max irqs off latencies.
|
||||
*/
|
||||
trace->stop(tr);
|
||||
/* stop the tracing. */
|
||||
tracing_stop();
|
||||
/* check both trace buffers */
|
||||
@@ -310,7 +401,7 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
|
||||
}
|
||||
|
||||
/* start the tracing */
|
||||
ret = trace->init(tr);
|
||||
ret = tracer_init(trace, tr);
|
||||
if (ret) {
|
||||
warn_failed_init_tracer(trace, ret);
|
||||
return ret;
|
||||
@@ -322,6 +413,14 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
|
||||
preempt_disable();
|
||||
udelay(100);
|
||||
preempt_enable();
|
||||
|
||||
/*
|
||||
* Stop the tracer to avoid a warning subsequent
|
||||
* to buffer flipping failure because tracing_stop()
|
||||
* disables the tr and max buffers, making flipping impossible
|
||||
* in case of parallels max preempt off latencies.
|
||||
*/
|
||||
trace->stop(tr);
|
||||
/* stop the tracing. */
|
||||
tracing_stop();
|
||||
/* check both trace buffers */
|
||||
@@ -364,10 +463,10 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
|
||||
}
|
||||
|
||||
/* start the tracing */
|
||||
ret = trace->init(tr);
|
||||
ret = tracer_init(trace, tr);
|
||||
if (ret) {
|
||||
warn_failed_init_tracer(trace, ret);
|
||||
goto out;
|
||||
goto out_no_start;
|
||||
}
|
||||
|
||||
/* reset the max latency */
|
||||
@@ -381,31 +480,35 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
|
||||
/* reverse the order of preempt vs irqs */
|
||||
local_irq_enable();
|
||||
|
||||
/*
|
||||
* Stop the tracer to avoid a warning subsequent
|
||||
* to buffer flipping failure because tracing_stop()
|
||||
* disables the tr and max buffers, making flipping impossible
|
||||
* in case of parallels max irqs/preempt off latencies.
|
||||
*/
|
||||
trace->stop(tr);
|
||||
/* stop the tracing. */
|
||||
tracing_stop();
|
||||
/* check both trace buffers */
|
||||
ret = trace_test_buffer(tr, NULL);
|
||||
if (ret) {
|
||||
tracing_start();
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = trace_test_buffer(&max_tr, &count);
|
||||
if (ret) {
|
||||
tracing_start();
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!ret && !count) {
|
||||
printk(KERN_CONT ".. no entries found ..");
|
||||
ret = -1;
|
||||
tracing_start();
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* do the test by disabling interrupts first this time */
|
||||
tracing_max_latency = 0;
|
||||
tracing_start();
|
||||
trace->start(tr);
|
||||
|
||||
preempt_disable();
|
||||
local_irq_disable();
|
||||
udelay(100);
|
||||
@@ -413,6 +516,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
|
||||
/* reverse the order of preempt vs irqs */
|
||||
local_irq_enable();
|
||||
|
||||
trace->stop(tr);
|
||||
/* stop the tracing. */
|
||||
tracing_stop();
|
||||
/* check both trace buffers */
|
||||
@@ -428,9 +532,10 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
trace->reset(tr);
|
||||
out:
|
||||
tracing_start();
|
||||
out_no_start:
|
||||
trace->reset(tr);
|
||||
tracing_max_latency = save_max;
|
||||
|
||||
return ret;
|
||||
@@ -496,7 +601,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
|
||||
wait_for_completion(&isrt);
|
||||
|
||||
/* start the tracing */
|
||||
ret = trace->init(tr);
|
||||
ret = tracer_init(trace, tr);
|
||||
if (ret) {
|
||||
warn_failed_init_tracer(trace, ret);
|
||||
return ret;
|
||||
@@ -557,7 +662,7 @@ trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr
|
||||
int ret;
|
||||
|
||||
/* start the tracing */
|
||||
ret = trace->init(tr);
|
||||
ret = tracer_init(trace, tr);
|
||||
if (ret) {
|
||||
warn_failed_init_tracer(trace, ret);
|
||||
return ret;
|
||||
@@ -589,34 +694,7 @@ trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
|
||||
int ret;
|
||||
|
||||
/* start the tracing */
|
||||
ret = trace->init(tr);
|
||||
if (ret) {
|
||||
warn_failed_init_tracer(trace, ret);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Sleep for a 1/10 of a second */
|
||||
msleep(100);
|
||||
/* stop the tracing. */
|
||||
tracing_stop();
|
||||
/* check the trace buffer */
|
||||
ret = trace_test_buffer(tr, &count);
|
||||
trace->reset(tr);
|
||||
tracing_start();
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif /* CONFIG_SYSPROF_TRACER */
|
||||
|
||||
#ifdef CONFIG_BRANCH_TRACER
|
||||
int
|
||||
trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
|
||||
{
|
||||
unsigned long count;
|
||||
int ret;
|
||||
|
||||
/* start the tracing */
|
||||
ret = trace->init(tr);
|
||||
ret = tracer_init(trace, tr);
|
||||
if (ret) {
|
||||
warn_failed_init_tracer(trace, ret);
|
||||
return ret;
|
||||
@@ -631,6 +709,43 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
|
||||
trace->reset(tr);
|
||||
tracing_start();
|
||||
|
||||
if (!ret && !count) {
|
||||
printk(KERN_CONT ".. no entries found ..");
|
||||
ret = -1;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif /* CONFIG_SYSPROF_TRACER */
|
||||
|
||||
#ifdef CONFIG_BRANCH_TRACER
|
||||
int
|
||||
trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
|
||||
{
|
||||
unsigned long count;
|
||||
int ret;
|
||||
|
||||
/* start the tracing */
|
||||
ret = tracer_init(trace, tr);
|
||||
if (ret) {
|
||||
warn_failed_init_tracer(trace, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Sleep for a 1/10 of a second */
|
||||
msleep(100);
|
||||
/* stop the tracing. */
|
||||
tracing_stop();
|
||||
/* check the trace buffer */
|
||||
ret = trace_test_buffer(tr, &count);
|
||||
trace->reset(tr);
|
||||
tracing_start();
|
||||
|
||||
if (!ret && !count) {
|
||||
printk(KERN_CONT ".. no entries found ..");
|
||||
ret = -1;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif /* CONFIG_BRANCH_TRACER */
|
||||
|
@@ -245,16 +245,31 @@ static int trace_lookup_stack(struct seq_file *m, long i)
|
||||
#endif
|
||||
}
|
||||
|
||||
static void print_disabled(struct seq_file *m)
|
||||
{
|
||||
seq_puts(m, "#\n"
|
||||
"# Stack tracer disabled\n"
|
||||
"#\n"
|
||||
"# To enable the stack tracer, either add 'stacktrace' to the\n"
|
||||
"# kernel command line\n"
|
||||
"# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
|
||||
"#\n");
|
||||
}
|
||||
|
||||
static int t_show(struct seq_file *m, void *v)
|
||||
{
|
||||
long i;
|
||||
int size;
|
||||
|
||||
if (v == SEQ_START_TOKEN) {
|
||||
seq_printf(m, " Depth Size Location"
|
||||
seq_printf(m, " Depth Size Location"
|
||||
" (%d entries)\n"
|
||||
" ----- ---- --------\n",
|
||||
" ----- ---- --------\n",
|
||||
max_stack_trace.nr_entries);
|
||||
|
||||
if (!stack_tracer_enabled && !max_stack_size)
|
||||
print_disabled(m);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
326
kernel/trace/trace_stat.c
Normal file
326
kernel/trace/trace_stat.c
Normal file
@@ -0,0 +1,326 @@
|
||||
/*
|
||||
* Infrastructure for statistic tracing (histogram output).
|
||||
*
|
||||
* Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
|
||||
*
|
||||
* Based on the code from trace_branch.c which is
|
||||
* Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include "trace_stat.h"
|
||||
#include "trace.h"
|
||||
|
||||
|
||||
/* List of stat entries from a tracer */
|
||||
struct trace_stat_list {
|
||||
struct list_head list;
|
||||
void *stat;
|
||||
};
|
||||
|
||||
/* A stat session is the stats output in one file */
|
||||
struct tracer_stat_session {
|
||||
struct list_head session_list;
|
||||
struct tracer_stat *ts;
|
||||
struct list_head stat_list;
|
||||
struct mutex stat_mutex;
|
||||
struct dentry *file;
|
||||
};
|
||||
|
||||
/* All of the sessions currently in use. Each stat file embed one session */
|
||||
static LIST_HEAD(all_stat_sessions);
|
||||
static DEFINE_MUTEX(all_stat_sessions_mutex);
|
||||
|
||||
/* The root directory for all stat files */
|
||||
static struct dentry *stat_dir;
|
||||
|
||||
|
||||
static void reset_stat_session(struct tracer_stat_session *session)
|
||||
{
|
||||
struct trace_stat_list *node, *next;
|
||||
|
||||
list_for_each_entry_safe(node, next, &session->stat_list, list)
|
||||
kfree(node);
|
||||
|
||||
INIT_LIST_HEAD(&session->stat_list);
|
||||
}
|
||||
|
||||
static void destroy_session(struct tracer_stat_session *session)
|
||||
{
|
||||
debugfs_remove(session->file);
|
||||
reset_stat_session(session);
|
||||
mutex_destroy(&session->stat_mutex);
|
||||
kfree(session);
|
||||
}
|
||||
|
||||
/*
|
||||
* For tracers that don't provide a stat_cmp callback.
|
||||
* This one will force an immediate insertion on tail of
|
||||
* the list.
|
||||
*/
|
||||
static int dummy_cmp(void *p1, void *p2)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize the stat list at each trace_stat file opening.
|
||||
* All of these copies and sorting are required on all opening
|
||||
* since the stats could have changed between two file sessions.
|
||||
*/
|
||||
static int stat_seq_init(struct tracer_stat_session *session)
|
||||
{
|
||||
struct trace_stat_list *iter_entry, *new_entry;
|
||||
struct tracer_stat *ts = session->ts;
|
||||
void *stat;
|
||||
int ret = 0;
|
||||
int i;
|
||||
|
||||
mutex_lock(&session->stat_mutex);
|
||||
reset_stat_session(session);
|
||||
|
||||
if (!ts->stat_cmp)
|
||||
ts->stat_cmp = dummy_cmp;
|
||||
|
||||
stat = ts->stat_start();
|
||||
if (!stat)
|
||||
goto exit;
|
||||
|
||||
/*
|
||||
* The first entry. Actually this is the second, but the first
|
||||
* one (the stat_list head) is pointless.
|
||||
*/
|
||||
new_entry = kmalloc(sizeof(struct trace_stat_list), GFP_KERNEL);
|
||||
if (!new_entry) {
|
||||
ret = -ENOMEM;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&new_entry->list);
|
||||
|
||||
list_add(&new_entry->list, &session->stat_list);
|
||||
|
||||
new_entry->stat = stat;
|
||||
|
||||
/*
|
||||
* Iterate over the tracer stat entries and store them in a sorted
|
||||
* list.
|
||||
*/
|
||||
for (i = 1; ; i++) {
|
||||
stat = ts->stat_next(stat, i);
|
||||
|
||||
/* End of insertion */
|
||||
if (!stat)
|
||||
break;
|
||||
|
||||
new_entry = kmalloc(sizeof(struct trace_stat_list), GFP_KERNEL);
|
||||
if (!new_entry) {
|
||||
ret = -ENOMEM;
|
||||
goto exit_free_list;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&new_entry->list);
|
||||
new_entry->stat = stat;
|
||||
|
||||
list_for_each_entry_reverse(iter_entry, &session->stat_list,
|
||||
list) {
|
||||
|
||||
/* Insertion with a descendent sorting */
|
||||
if (ts->stat_cmp(iter_entry->stat,
|
||||
new_entry->stat) >= 0) {
|
||||
|
||||
list_add(&new_entry->list, &iter_entry->list);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* The current larger value */
|
||||
if (list_empty(&new_entry->list))
|
||||
list_add(&new_entry->list, &session->stat_list);
|
||||
}
|
||||
exit:
|
||||
mutex_unlock(&session->stat_mutex);
|
||||
return ret;
|
||||
|
||||
exit_free_list:
|
||||
reset_stat_session(session);
|
||||
mutex_unlock(&session->stat_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static void *stat_seq_start(struct seq_file *s, loff_t *pos)
|
||||
{
|
||||
struct tracer_stat_session *session = s->private;
|
||||
|
||||
/* Prevent from tracer switch or stat_list modification */
|
||||
mutex_lock(&session->stat_mutex);
|
||||
|
||||
/* If we are in the beginning of the file, print the headers */
|
||||
if (!*pos && session->ts->stat_headers)
|
||||
return SEQ_START_TOKEN;
|
||||
|
||||
return seq_list_start(&session->stat_list, *pos);
|
||||
}
|
||||
|
||||
static void *stat_seq_next(struct seq_file *s, void *p, loff_t *pos)
|
||||
{
|
||||
struct tracer_stat_session *session = s->private;
|
||||
|
||||
if (p == SEQ_START_TOKEN)
|
||||
return seq_list_start(&session->stat_list, *pos);
|
||||
|
||||
return seq_list_next(p, &session->stat_list, pos);
|
||||
}
|
||||
|
||||
static void stat_seq_stop(struct seq_file *s, void *p)
|
||||
{
|
||||
struct tracer_stat_session *session = s->private;
|
||||
mutex_unlock(&session->stat_mutex);
|
||||
}
|
||||
|
||||
static int stat_seq_show(struct seq_file *s, void *v)
|
||||
{
|
||||
struct tracer_stat_session *session = s->private;
|
||||
struct trace_stat_list *l = list_entry(v, struct trace_stat_list, list);
|
||||
|
||||
if (v == SEQ_START_TOKEN)
|
||||
return session->ts->stat_headers(s);
|
||||
|
||||
return session->ts->stat_show(s, l->stat);
|
||||
}
|
||||
|
||||
static const struct seq_operations trace_stat_seq_ops = {
|
||||
.start = stat_seq_start,
|
||||
.next = stat_seq_next,
|
||||
.stop = stat_seq_stop,
|
||||
.show = stat_seq_show
|
||||
};
|
||||
|
||||
/* The session stat is refilled and resorted at each stat file opening */
|
||||
static int tracing_stat_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
int ret;
|
||||
|
||||
struct tracer_stat_session *session = inode->i_private;
|
||||
|
||||
ret = seq_open(file, &trace_stat_seq_ops);
|
||||
if (!ret) {
|
||||
struct seq_file *m = file->private_data;
|
||||
m->private = session;
|
||||
ret = stat_seq_init(session);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Avoid consuming memory with our now useless list.
|
||||
*/
|
||||
static int tracing_stat_release(struct inode *i, struct file *f)
|
||||
{
|
||||
struct tracer_stat_session *session = i->i_private;
|
||||
|
||||
mutex_lock(&session->stat_mutex);
|
||||
reset_stat_session(session);
|
||||
mutex_unlock(&session->stat_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct file_operations tracing_stat_fops = {
|
||||
.open = tracing_stat_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = tracing_stat_release
|
||||
};
|
||||
|
||||
static int tracing_stat_init(void)
|
||||
{
|
||||
struct dentry *d_tracing;
|
||||
|
||||
d_tracing = tracing_init_dentry();
|
||||
|
||||
stat_dir = debugfs_create_dir("trace_stat", d_tracing);
|
||||
if (!stat_dir)
|
||||
pr_warning("Could not create debugfs "
|
||||
"'trace_stat' entry\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_stat_file(struct tracer_stat_session *session)
|
||||
{
|
||||
if (!stat_dir && tracing_stat_init())
|
||||
return -ENODEV;
|
||||
|
||||
session->file = debugfs_create_file(session->ts->name, 0644,
|
||||
stat_dir,
|
||||
session, &tracing_stat_fops);
|
||||
if (!session->file)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int register_stat_tracer(struct tracer_stat *trace)
|
||||
{
|
||||
struct tracer_stat_session *session, *node, *tmp;
|
||||
int ret;
|
||||
|
||||
if (!trace)
|
||||
return -EINVAL;
|
||||
|
||||
if (!trace->stat_start || !trace->stat_next || !trace->stat_show)
|
||||
return -EINVAL;
|
||||
|
||||
/* Already registered? */
|
||||
mutex_lock(&all_stat_sessions_mutex);
|
||||
list_for_each_entry_safe(node, tmp, &all_stat_sessions, session_list) {
|
||||
if (node->ts == trace) {
|
||||
mutex_unlock(&all_stat_sessions_mutex);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&all_stat_sessions_mutex);
|
||||
|
||||
/* Init the session */
|
||||
session = kmalloc(sizeof(struct tracer_stat_session), GFP_KERNEL);
|
||||
if (!session)
|
||||
return -ENOMEM;
|
||||
|
||||
session->ts = trace;
|
||||
INIT_LIST_HEAD(&session->session_list);
|
||||
INIT_LIST_HEAD(&session->stat_list);
|
||||
mutex_init(&session->stat_mutex);
|
||||
session->file = NULL;
|
||||
|
||||
ret = init_stat_file(session);
|
||||
if (ret) {
|
||||
destroy_session(session);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Register */
|
||||
mutex_lock(&all_stat_sessions_mutex);
|
||||
list_add_tail(&session->session_list, &all_stat_sessions);
|
||||
mutex_unlock(&all_stat_sessions_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void unregister_stat_tracer(struct tracer_stat *trace)
|
||||
{
|
||||
struct tracer_stat_session *node, *tmp;
|
||||
|
||||
mutex_lock(&all_stat_sessions_mutex);
|
||||
list_for_each_entry_safe(node, tmp, &all_stat_sessions, session_list) {
|
||||
if (node->ts == trace) {
|
||||
list_del(&node->session_list);
|
||||
destroy_session(node);
|
||||
break;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&all_stat_sessions_mutex);
|
||||
}
|
31
kernel/trace/trace_stat.h
Normal file
31
kernel/trace/trace_stat.h
Normal file
@@ -0,0 +1,31 @@
|
||||
#ifndef __TRACE_STAT_H
|
||||
#define __TRACE_STAT_H
|
||||
|
||||
#include <linux/seq_file.h>
|
||||
|
||||
/*
|
||||
* If you want to provide a stat file (one-shot statistics), fill
|
||||
* an iterator with stat_start/stat_next and a stat_show callbacks.
|
||||
* The others callbacks are optional.
|
||||
*/
|
||||
struct tracer_stat {
|
||||
/* The name of your stat file */
|
||||
const char *name;
|
||||
/* Iteration over statistic entries */
|
||||
void *(*stat_start)(void);
|
||||
void *(*stat_next)(void *prev, int idx);
|
||||
/* Compare two entries for stats sorting */
|
||||
int (*stat_cmp)(void *p1, void *p2);
|
||||
/* Print a stat entry */
|
||||
int (*stat_show)(struct seq_file *s, void *p);
|
||||
/* Print the headers of your stat entries */
|
||||
int (*stat_headers)(struct seq_file *s);
|
||||
};
|
||||
|
||||
/*
|
||||
* Destroy or create a stat file
|
||||
*/
|
||||
extern int register_stat_tracer(struct tracer_stat *trace);
|
||||
extern void unregister_stat_tracer(struct tracer_stat *trace);
|
||||
|
||||
#endif /* __TRACE_STAT_H */
|
250
kernel/trace/trace_syscalls.c
Normal file
250
kernel/trace/trace_syscalls.c
Normal file
@@ -0,0 +1,250 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <asm/syscall.h>
|
||||
|
||||
#include "trace_output.h"
|
||||
#include "trace.h"
|
||||
|
||||
/* Keep a counter of the syscall tracing users */
|
||||
static int refcount;
|
||||
|
||||
/* Prevent from races on thread flags toggling */
|
||||
static DEFINE_MUTEX(syscall_trace_lock);
|
||||
|
||||
/* Option to display the parameters types */
|
||||
enum {
|
||||
TRACE_SYSCALLS_OPT_TYPES = 0x1,
|
||||
};
|
||||
|
||||
static struct tracer_opt syscalls_opts[] = {
|
||||
{ TRACER_OPT(syscall_arg_type, TRACE_SYSCALLS_OPT_TYPES) },
|
||||
{ }
|
||||
};
|
||||
|
||||
static struct tracer_flags syscalls_flags = {
|
||||
.val = 0, /* By default: no parameters types */
|
||||
.opts = syscalls_opts
|
||||
};
|
||||
|
||||
enum print_line_t
|
||||
print_syscall_enter(struct trace_iterator *iter, int flags)
|
||||
{
|
||||
struct trace_seq *s = &iter->seq;
|
||||
struct trace_entry *ent = iter->ent;
|
||||
struct syscall_trace_enter *trace;
|
||||
struct syscall_metadata *entry;
|
||||
int i, ret, syscall;
|
||||
|
||||
trace_assign_type(trace, ent);
|
||||
|
||||
syscall = trace->nr;
|
||||
|
||||
entry = syscall_nr_to_meta(syscall);
|
||||
if (!entry)
|
||||
goto end;
|
||||
|
||||
ret = trace_seq_printf(s, "%s(", entry->name);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
for (i = 0; i < entry->nb_args; i++) {
|
||||
/* parameter types */
|
||||
if (syscalls_flags.val & TRACE_SYSCALLS_OPT_TYPES) {
|
||||
ret = trace_seq_printf(s, "%s ", entry->types[i]);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
/* parameter values */
|
||||
ret = trace_seq_printf(s, "%s: %lx%s ", entry->args[i],
|
||||
trace->args[i],
|
||||
i == entry->nb_args - 1 ? ")" : ",");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
end:
|
||||
trace_seq_printf(s, "\n");
|
||||
return TRACE_TYPE_HANDLED;
|
||||
}
|
||||
|
||||
enum print_line_t
|
||||
print_syscall_exit(struct trace_iterator *iter, int flags)
|
||||
{
|
||||
struct trace_seq *s = &iter->seq;
|
||||
struct trace_entry *ent = iter->ent;
|
||||
struct syscall_trace_exit *trace;
|
||||
int syscall;
|
||||
struct syscall_metadata *entry;
|
||||
int ret;
|
||||
|
||||
trace_assign_type(trace, ent);
|
||||
|
||||
syscall = trace->nr;
|
||||
|
||||
entry = syscall_nr_to_meta(syscall);
|
||||
if (!entry) {
|
||||
trace_seq_printf(s, "\n");
|
||||
return TRACE_TYPE_HANDLED;
|
||||
}
|
||||
|
||||
ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
|
||||
trace->ret);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
return TRACE_TYPE_HANDLED;
|
||||
}
|
||||
|
||||
void start_ftrace_syscalls(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct task_struct *g, *t;
|
||||
|
||||
mutex_lock(&syscall_trace_lock);
|
||||
|
||||
/* Don't enable the flag on the tasks twice */
|
||||
if (++refcount != 1)
|
||||
goto unlock;
|
||||
|
||||
arch_init_ftrace_syscalls();
|
||||
read_lock_irqsave(&tasklist_lock, flags);
|
||||
|
||||
do_each_thread(g, t) {
|
||||
set_tsk_thread_flag(t, TIF_SYSCALL_FTRACE);
|
||||
} while_each_thread(g, t);
|
||||
|
||||
read_unlock_irqrestore(&tasklist_lock, flags);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&syscall_trace_lock);
|
||||
}
|
||||
|
||||
void stop_ftrace_syscalls(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct task_struct *g, *t;
|
||||
|
||||
mutex_lock(&syscall_trace_lock);
|
||||
|
||||
/* There are perhaps still some users */
|
||||
if (--refcount)
|
||||
goto unlock;
|
||||
|
||||
read_lock_irqsave(&tasklist_lock, flags);
|
||||
|
||||
do_each_thread(g, t) {
|
||||
clear_tsk_thread_flag(t, TIF_SYSCALL_FTRACE);
|
||||
} while_each_thread(g, t);
|
||||
|
||||
read_unlock_irqrestore(&tasklist_lock, flags);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&syscall_trace_lock);
|
||||
}
|
||||
|
||||
void ftrace_syscall_enter(struct pt_regs *regs)
|
||||
{
|
||||
struct syscall_trace_enter *entry;
|
||||
struct syscall_metadata *sys_data;
|
||||
struct ring_buffer_event *event;
|
||||
int size;
|
||||
int syscall_nr;
|
||||
|
||||
syscall_nr = syscall_get_nr(current, regs);
|
||||
|
||||
sys_data = syscall_nr_to_meta(syscall_nr);
|
||||
if (!sys_data)
|
||||
return;
|
||||
|
||||
size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
|
||||
|
||||
event = trace_current_buffer_lock_reserve(TRACE_SYSCALL_ENTER, size,
|
||||
0, 0);
|
||||
if (!event)
|
||||
return;
|
||||
|
||||
entry = ring_buffer_event_data(event);
|
||||
entry->nr = syscall_nr;
|
||||
syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
|
||||
|
||||
trace_current_buffer_unlock_commit(event, 0, 0);
|
||||
trace_wake_up();
|
||||
}
|
||||
|
||||
void ftrace_syscall_exit(struct pt_regs *regs)
|
||||
{
|
||||
struct syscall_trace_exit *entry;
|
||||
struct syscall_metadata *sys_data;
|
||||
struct ring_buffer_event *event;
|
||||
int syscall_nr;
|
||||
|
||||
syscall_nr = syscall_get_nr(current, regs);
|
||||
|
||||
sys_data = syscall_nr_to_meta(syscall_nr);
|
||||
if (!sys_data)
|
||||
return;
|
||||
|
||||
event = trace_current_buffer_lock_reserve(TRACE_SYSCALL_EXIT,
|
||||
sizeof(*entry), 0, 0);
|
||||
if (!event)
|
||||
return;
|
||||
|
||||
entry = ring_buffer_event_data(event);
|
||||
entry->nr = syscall_nr;
|
||||
entry->ret = syscall_get_return_value(current, regs);
|
||||
|
||||
trace_current_buffer_unlock_commit(event, 0, 0);
|
||||
trace_wake_up();
|
||||
}
|
||||
|
||||
static int init_syscall_tracer(struct trace_array *tr)
|
||||
{
|
||||
start_ftrace_syscalls();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void reset_syscall_tracer(struct trace_array *tr)
|
||||
{
|
||||
stop_ftrace_syscalls();
|
||||
tracing_reset_online_cpus(tr);
|
||||
}
|
||||
|
||||
static struct trace_event syscall_enter_event = {
|
||||
.type = TRACE_SYSCALL_ENTER,
|
||||
.trace = print_syscall_enter,
|
||||
};
|
||||
|
||||
static struct trace_event syscall_exit_event = {
|
||||
.type = TRACE_SYSCALL_EXIT,
|
||||
.trace = print_syscall_exit,
|
||||
};
|
||||
|
||||
static struct tracer syscall_tracer __read_mostly = {
|
||||
.name = "syscall",
|
||||
.init = init_syscall_tracer,
|
||||
.reset = reset_syscall_tracer,
|
||||
.flags = &syscalls_flags,
|
||||
};
|
||||
|
||||
__init int register_ftrace_syscalls(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = register_ftrace_event(&syscall_enter_event);
|
||||
if (!ret) {
|
||||
printk(KERN_WARNING "event %d failed to register\n",
|
||||
syscall_enter_event.type);
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
|
||||
ret = register_ftrace_event(&syscall_exit_event);
|
||||
if (!ret) {
|
||||
printk(KERN_WARNING "event %d failed to register\n",
|
||||
syscall_exit_event.type);
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
|
||||
return register_tracer(&syscall_tracer);
|
||||
}
|
||||
device_initcall(register_ftrace_syscalls);
|
@@ -88,7 +88,7 @@ static void backtrace_address(void *data, unsigned long addr, int reliable)
|
||||
}
|
||||
}
|
||||
|
||||
const static struct stacktrace_ops backtrace_ops = {
|
||||
static const struct stacktrace_ops backtrace_ops = {
|
||||
.warning = backtrace_warning,
|
||||
.warning_symbol = backtrace_warning_symbol,
|
||||
.stack = backtrace_stack,
|
||||
@@ -226,15 +226,6 @@ static void stop_stack_timers(void)
|
||||
stop_stack_timer(cpu);
|
||||
}
|
||||
|
||||
static void start_stack_trace(struct trace_array *tr)
|
||||
{
|
||||
mutex_lock(&sample_timer_lock);
|
||||
tracing_reset_online_cpus(tr);
|
||||
start_stack_timers();
|
||||
tracer_enabled = 1;
|
||||
mutex_unlock(&sample_timer_lock);
|
||||
}
|
||||
|
||||
static void stop_stack_trace(struct trace_array *tr)
|
||||
{
|
||||
mutex_lock(&sample_timer_lock);
|
||||
@@ -247,12 +238,18 @@ static int stack_trace_init(struct trace_array *tr)
|
||||
{
|
||||
sysprof_trace = tr;
|
||||
|
||||
start_stack_trace(tr);
|
||||
tracing_start_cmdline_record();
|
||||
|
||||
mutex_lock(&sample_timer_lock);
|
||||
start_stack_timers();
|
||||
tracer_enabled = 1;
|
||||
mutex_unlock(&sample_timer_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void stack_trace_reset(struct trace_array *tr)
|
||||
{
|
||||
tracing_stop_cmdline_record();
|
||||
stop_stack_trace(tr);
|
||||
}
|
||||
|
||||
@@ -317,7 +314,7 @@ sysprof_sample_write(struct file *filp, const char __user *ubuf,
|
||||
return cnt;
|
||||
}
|
||||
|
||||
static struct file_operations sysprof_sample_fops = {
|
||||
static const struct file_operations sysprof_sample_fops = {
|
||||
.read = sysprof_sample_read,
|
||||
.write = sysprof_sample_write,
|
||||
};
|
||||
@@ -330,5 +327,5 @@ void init_tracer_sysprof_debugfs(struct dentry *d_tracer)
|
||||
d_tracer, NULL, &sysprof_sample_fops);
|
||||
if (entry)
|
||||
return;
|
||||
pr_warning("Could not create debugfs 'dyn_ftrace_total_info' entry\n");
|
||||
pr_warning("Could not create debugfs 'sysprof_sample_period' entry\n");
|
||||
}
|
||||
|
288
kernel/trace/trace_workqueue.c
Normal file
288
kernel/trace/trace_workqueue.c
Normal file
@@ -0,0 +1,288 @@
|
||||
/*
|
||||
* Workqueue statistical tracer.
|
||||
*
|
||||
* Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
#include <trace/workqueue.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/percpu.h>
|
||||
#include "trace_stat.h"
|
||||
#include "trace.h"
|
||||
|
||||
|
||||
/* A cpu workqueue thread */
|
||||
struct cpu_workqueue_stats {
|
||||
struct list_head list;
|
||||
/* Useful to know if we print the cpu headers */
|
||||
bool first_entry;
|
||||
int cpu;
|
||||
pid_t pid;
|
||||
/* Can be inserted from interrupt or user context, need to be atomic */
|
||||
atomic_t inserted;
|
||||
/*
|
||||
* Don't need to be atomic, works are serialized in a single workqueue thread
|
||||
* on a single CPU.
|
||||
*/
|
||||
unsigned int executed;
|
||||
};
|
||||
|
||||
/* List of workqueue threads on one cpu */
|
||||
struct workqueue_global_stats {
|
||||
struct list_head list;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
/* Don't need a global lock because allocated before the workqueues, and
|
||||
* never freed.
|
||||
*/
|
||||
static DEFINE_PER_CPU(struct workqueue_global_stats, all_workqueue_stat);
|
||||
#define workqueue_cpu_stat(cpu) (&per_cpu(all_workqueue_stat, cpu))
|
||||
|
||||
/* Insertion of a work */
|
||||
static void
|
||||
probe_workqueue_insertion(struct task_struct *wq_thread,
|
||||
struct work_struct *work)
|
||||
{
|
||||
int cpu = cpumask_first(&wq_thread->cpus_allowed);
|
||||
struct cpu_workqueue_stats *node, *next;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
|
||||
list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list,
|
||||
list) {
|
||||
if (node->pid == wq_thread->pid) {
|
||||
atomic_inc(&node->inserted);
|
||||
goto found;
|
||||
}
|
||||
}
|
||||
pr_debug("trace_workqueue: entry not found\n");
|
||||
found:
|
||||
spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
|
||||
}
|
||||
|
||||
/* Execution of a work */
|
||||
static void
|
||||
probe_workqueue_execution(struct task_struct *wq_thread,
|
||||
struct work_struct *work)
|
||||
{
|
||||
int cpu = cpumask_first(&wq_thread->cpus_allowed);
|
||||
struct cpu_workqueue_stats *node, *next;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
|
||||
list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list,
|
||||
list) {
|
||||
if (node->pid == wq_thread->pid) {
|
||||
node->executed++;
|
||||
goto found;
|
||||
}
|
||||
}
|
||||
pr_debug("trace_workqueue: entry not found\n");
|
||||
found:
|
||||
spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
|
||||
}
|
||||
|
||||
/* Creation of a cpu workqueue thread */
|
||||
static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu)
|
||||
{
|
||||
struct cpu_workqueue_stats *cws;
|
||||
unsigned long flags;
|
||||
|
||||
WARN_ON(cpu < 0);
|
||||
|
||||
/* Workqueues are sometimes created in atomic context */
|
||||
cws = kzalloc(sizeof(struct cpu_workqueue_stats), GFP_ATOMIC);
|
||||
if (!cws) {
|
||||
pr_warning("trace_workqueue: not enough memory\n");
|
||||
return;
|
||||
}
|
||||
INIT_LIST_HEAD(&cws->list);
|
||||
cws->cpu = cpu;
|
||||
|
||||
cws->pid = wq_thread->pid;
|
||||
|
||||
spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
|
||||
if (list_empty(&workqueue_cpu_stat(cpu)->list))
|
||||
cws->first_entry = true;
|
||||
list_add_tail(&cws->list, &workqueue_cpu_stat(cpu)->list);
|
||||
spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
|
||||
}
|
||||
|
||||
/* Destruction of a cpu workqueue thread */
|
||||
static void probe_workqueue_destruction(struct task_struct *wq_thread)
|
||||
{
|
||||
/* Workqueue only execute on one cpu */
|
||||
int cpu = cpumask_first(&wq_thread->cpus_allowed);
|
||||
struct cpu_workqueue_stats *node, *next;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
|
||||
list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list,
|
||||
list) {
|
||||
if (node->pid == wq_thread->pid) {
|
||||
list_del(&node->list);
|
||||
kfree(node);
|
||||
goto found;
|
||||
}
|
||||
}
|
||||
|
||||
pr_debug("trace_workqueue: don't find workqueue to destroy\n");
|
||||
found:
|
||||
spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
|
||||
|
||||
}
|
||||
|
||||
static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct cpu_workqueue_stats *ret = NULL;
|
||||
|
||||
|
||||
spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
|
||||
|
||||
if (!list_empty(&workqueue_cpu_stat(cpu)->list))
|
||||
ret = list_entry(workqueue_cpu_stat(cpu)->list.next,
|
||||
struct cpu_workqueue_stats, list);
|
||||
|
||||
spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void *workqueue_stat_start(void)
|
||||
{
|
||||
int cpu;
|
||||
void *ret = NULL;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
ret = workqueue_stat_start_cpu(cpu);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *workqueue_stat_next(void *prev, int idx)
|
||||
{
|
||||
struct cpu_workqueue_stats *prev_cws = prev;
|
||||
int cpu = prev_cws->cpu;
|
||||
unsigned long flags;
|
||||
void *ret = NULL;
|
||||
|
||||
spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
|
||||
if (list_is_last(&prev_cws->list, &workqueue_cpu_stat(cpu)->list)) {
|
||||
spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
|
||||
do {
|
||||
cpu = cpumask_next(cpu, cpu_possible_mask);
|
||||
if (cpu >= nr_cpu_ids)
|
||||
return NULL;
|
||||
} while (!(ret = workqueue_stat_start_cpu(cpu)));
|
||||
return ret;
|
||||
}
|
||||
spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
|
||||
|
||||
return list_entry(prev_cws->list.next, struct cpu_workqueue_stats,
|
||||
list);
|
||||
}
|
||||
|
||||
static int workqueue_stat_show(struct seq_file *s, void *p)
|
||||
{
|
||||
struct cpu_workqueue_stats *cws = p;
|
||||
unsigned long flags;
|
||||
int cpu = cws->cpu;
|
||||
struct pid *pid;
|
||||
struct task_struct *tsk;
|
||||
|
||||
spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
|
||||
if (&cws->list == workqueue_cpu_stat(cpu)->list.next)
|
||||
seq_printf(s, "\n");
|
||||
spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
|
||||
|
||||
pid = find_get_pid(cws->pid);
|
||||
if (pid) {
|
||||
tsk = get_pid_task(pid, PIDTYPE_PID);
|
||||
if (tsk) {
|
||||
seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
|
||||
atomic_read(&cws->inserted), cws->executed,
|
||||
tsk->comm);
|
||||
put_task_struct(tsk);
|
||||
}
|
||||
put_pid(pid);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int workqueue_stat_headers(struct seq_file *s)
|
||||
{
|
||||
seq_printf(s, "# CPU INSERTED EXECUTED NAME\n");
|
||||
seq_printf(s, "# | | | |\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct tracer_stat workqueue_stats __read_mostly = {
|
||||
.name = "workqueues",
|
||||
.stat_start = workqueue_stat_start,
|
||||
.stat_next = workqueue_stat_next,
|
||||
.stat_show = workqueue_stat_show,
|
||||
.stat_headers = workqueue_stat_headers
|
||||
};
|
||||
|
||||
|
||||
int __init stat_workqueue_init(void)
|
||||
{
|
||||
if (register_stat_tracer(&workqueue_stats)) {
|
||||
pr_warning("Unable to register workqueue stat tracer\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
fs_initcall(stat_workqueue_init);
|
||||
|
||||
/*
|
||||
* Workqueues are created very early, just after pre-smp initcalls.
|
||||
* So we must register our tracepoints at this stage.
|
||||
*/
|
||||
int __init trace_workqueue_early_init(void)
|
||||
{
|
||||
int ret, cpu;
|
||||
|
||||
ret = register_trace_workqueue_insertion(probe_workqueue_insertion);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = register_trace_workqueue_execution(probe_workqueue_execution);
|
||||
if (ret)
|
||||
goto no_insertion;
|
||||
|
||||
ret = register_trace_workqueue_creation(probe_workqueue_creation);
|
||||
if (ret)
|
||||
goto no_execution;
|
||||
|
||||
ret = register_trace_workqueue_destruction(probe_workqueue_destruction);
|
||||
if (ret)
|
||||
goto no_creation;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
spin_lock_init(&workqueue_cpu_stat(cpu)->lock);
|
||||
INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
no_creation:
|
||||
unregister_trace_workqueue_creation(probe_workqueue_creation);
|
||||
no_execution:
|
||||
unregister_trace_workqueue_execution(probe_workqueue_execution);
|
||||
no_insertion:
|
||||
unregister_trace_workqueue_insertion(probe_workqueue_insertion);
|
||||
out:
|
||||
pr_warning("trace_workqueue: unable to trace workqueues\n");
|
||||
|
||||
return 1;
|
||||
}
|
||||
early_initcall(trace_workqueue_early_init);
|
Reference in New Issue
Block a user