Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6

This commit is contained in:
Rusty Russell
2008-12-30 08:02:35 +10:30
2974 changed files with 145857 additions and 82592 deletions

View File

@@ -3,18 +3,34 @@
# select HAVE_FUNCTION_TRACER:
#
config USER_STACKTRACE_SUPPORT
bool
config NOP_TRACER
bool
config HAVE_FUNCTION_TRACER
bool
config HAVE_FUNCTION_GRAPH_TRACER
bool
config HAVE_FUNCTION_TRACE_MCOUNT_TEST
bool
help
This gets selected when the arch tests the function_trace_stop
variable at the mcount call site. Otherwise, this variable
is tested by the called function.
config HAVE_DYNAMIC_FTRACE
bool
config HAVE_FTRACE_MCOUNT_RECORD
bool
config HAVE_HW_BRANCH_TRACER
bool
config TRACER_MAX_TRACE
bool
@@ -47,6 +63,20 @@ config FUNCTION_TRACER
(the bootup default), then the overhead of the instructions is very
small and not measurable even in micro-benchmarks.
config FUNCTION_GRAPH_TRACER
bool "Kernel Function Graph Tracer"
depends on HAVE_FUNCTION_GRAPH_TRACER
depends on FUNCTION_TRACER
default y
help
Enable the kernel to trace a function at both its return
and its entry.
It's first purpose is to trace the duration of functions and
draw a call graph for each thread with some informations like
the return value.
This is done by setting the current return address on the current
task structure into a stack of calls.
config IRQSOFF_TRACER
bool "Interrupts-off Latency Tracer"
default n
@@ -138,6 +168,70 @@ config BOOT_TRACER
selected, because the self-tests are an initcall as well and that
would invalidate the boot trace. )
config TRACE_BRANCH_PROFILING
bool "Trace likely/unlikely profiler"
depends on DEBUG_KERNEL
select TRACING
help
This tracer profiles all the the likely and unlikely macros
in the kernel. It will display the results in:
/debugfs/tracing/profile_annotated_branch
Note: this will add a significant overhead, only turn this
on if you need to profile the system's use of these macros.
Say N if unsure.
config PROFILE_ALL_BRANCHES
bool "Profile all if conditionals"
depends on TRACE_BRANCH_PROFILING
help
This tracer profiles all branch conditions. Every if ()
taken in the kernel is recorded whether it hit or miss.
The results will be displayed in:
/debugfs/tracing/profile_branch
This configuration, when enabled, will impose a great overhead
on the system. This should only be enabled when the system
is to be analyzed
Say N if unsure.
config TRACING_BRANCHES
bool
help
Selected by tracers that will trace the likely and unlikely
conditions. This prevents the tracers themselves from being
profiled. Profiling the tracing infrastructure can only happen
when the likelys and unlikelys are not being traced.
config BRANCH_TRACER
bool "Trace likely/unlikely instances"
depends on TRACE_BRANCH_PROFILING
select TRACING_BRANCHES
help
This traces the events of likely and unlikely condition
calls in the kernel. The difference between this and the
"Trace likely/unlikely profiler" is that this is not a
histogram of the callers, but actually places the calling
events into a running trace buffer to see when and where the
events happened, as well as their results.
Say N if unsure.
config POWER_TRACER
bool "Trace power consumption behavior"
depends on DEBUG_KERNEL
depends on X86
select TRACING
help
This tracer helps developers to analyze and optimize the kernels
power management decisions, specifically the C-state and P-state
behavior.
config STACK_TRACER
bool "Trace max stack"
depends on HAVE_FUNCTION_TRACER
@@ -150,13 +244,26 @@ config STACK_TRACER
This tracer works by hooking into every function call that the
kernel executes, and keeping a maximum stack depth value and
stack-trace saved. Because this logic has to execute in every
kernel function, all the time, this option can slow down the
kernel measurably and is generally intended for kernel
developers only.
stack-trace saved. If this is configured with DYNAMIC_FTRACE
then it will not have any overhead while the stack tracer
is disabled.
To enable the stack tracer on bootup, pass in 'stacktrace'
on the kernel command line.
The stack tracer can also be enabled or disabled via the
sysctl kernel.stack_tracer_enabled
Say N if unsure.
config HW_BRANCH_TRACER
depends on HAVE_HW_BRANCH_TRACER
bool "Trace hw branches"
select TRACING
help
This tracer records all branches on the system in a circular
buffer giving access to the last N branches for each cpu.
config DYNAMIC_FTRACE
bool "enable/disable ftrace tracepoints dynamically"
depends on FUNCTION_TRACER

View File

@@ -10,6 +10,11 @@ CFLAGS_trace_selftest_dynamic.o = -pg
obj-y += trace_selftest_dynamic.o
endif
# If unlikely tracing is enabled, do not trace these files
ifdef CONFIG_TRACING_BRANCHES
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
endif
obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o
obj-$(CONFIG_RING_BUFFER) += ring_buffer.o
@@ -24,5 +29,9 @@ obj-$(CONFIG_NOP_TRACER) += trace_nop.o
obj-$(CONFIG_STACK_TRACER) += trace_stack.o
obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
obj-$(CONFIG_BOOT_TRACER) += trace_boot.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o
obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o
obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o
obj-$(CONFIG_POWER_TRACER) += trace_power.o
libftrace-y := ftrace.o

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -8,6 +8,7 @@
#include <linux/ring_buffer.h>
#include <linux/mmiotrace.h>
#include <linux/ftrace.h>
#include <trace/boot.h>
enum trace_type {
__TRACE_FIRST_TYPE = 0,
@@ -21,7 +22,14 @@ enum trace_type {
TRACE_SPECIAL,
TRACE_MMIO_RW,
TRACE_MMIO_MAP,
TRACE_BOOT,
TRACE_BRANCH,
TRACE_BOOT_CALL,
TRACE_BOOT_RET,
TRACE_GRAPH_RET,
TRACE_GRAPH_ENT,
TRACE_USER_STACK,
TRACE_HW_BRANCHES,
TRACE_POWER,
__TRACE_LAST_TYPE
};
@@ -38,6 +46,7 @@ struct trace_entry {
unsigned char flags;
unsigned char preempt_count;
int pid;
int tgid;
};
/*
@@ -48,6 +57,18 @@ struct ftrace_entry {
unsigned long ip;
unsigned long parent_ip;
};
/* Function call entry */
struct ftrace_graph_ent_entry {
struct trace_entry ent;
struct ftrace_graph_ent graph_ent;
};
/* Function return entry */
struct ftrace_graph_ret_entry {
struct trace_entry ent;
struct ftrace_graph_ret ret;
};
extern struct tracer boot_tracer;
/*
@@ -85,12 +106,18 @@ struct stack_entry {
unsigned long caller[FTRACE_STACK_ENTRIES];
};
struct userstack_entry {
struct trace_entry ent;
unsigned long caller[FTRACE_STACK_ENTRIES];
};
/*
* ftrace_printk entry:
*/
struct print_entry {
struct trace_entry ent;
unsigned long ip;
int depth;
char buf[];
};
@@ -112,9 +139,35 @@ struct trace_mmiotrace_map {
struct mmiotrace_map map;
};
struct trace_boot {
struct trace_boot_call {
struct trace_entry ent;
struct boot_trace initcall;
struct boot_trace_call boot_call;
};
struct trace_boot_ret {
struct trace_entry ent;
struct boot_trace_ret boot_ret;
};
#define TRACE_FUNC_SIZE 30
#define TRACE_FILE_SIZE 20
struct trace_branch {
struct trace_entry ent;
unsigned line;
char func[TRACE_FUNC_SIZE+1];
char file[TRACE_FILE_SIZE+1];
char correct;
};
struct hw_branch_entry {
struct trace_entry ent;
u64 from;
u64 to;
};
struct trace_power {
struct trace_entry ent;
struct power_trace state_data;
};
/*
@@ -172,7 +225,6 @@ struct trace_iterator;
struct trace_array {
struct ring_buffer *buffer;
unsigned long entries;
long ctrl;
int cpu;
cycle_t time_start;
struct task_struct *waiter;
@@ -212,13 +264,22 @@ extern void __ftrace_bad_type(void);
IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \
IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
IF_ASSIGN(var, ent, struct special_entry, 0); \
IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
TRACE_MMIO_RW); \
IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
TRACE_MMIO_MAP); \
IF_ASSIGN(var, ent, struct trace_boot, TRACE_BOOT); \
IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
TRACE_GRAPH_ENT); \
IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
TRACE_GRAPH_RET); \
IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\
IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
__ftrace_bad_type(); \
} while (0)
@@ -229,29 +290,56 @@ enum print_line_t {
TRACE_TYPE_UNHANDLED = 2 /* Relay to other output functions */
};
/*
* An option specific to a tracer. This is a boolean value.
* The bit is the bit index that sets its value on the
* flags value in struct tracer_flags.
*/
struct tracer_opt {
const char *name; /* Will appear on the trace_options file */
u32 bit; /* Mask assigned in val field in tracer_flags */
};
/*
* The set of specific options for a tracer. Your tracer
* have to set the initial value of the flags val.
*/
struct tracer_flags {
u32 val;
struct tracer_opt *opts;
};
/* Makes more easy to define a tracer opt */
#define TRACER_OPT(s, b) .name = #s, .bit = b
/*
* A specific tracer, represented by methods that operate on a trace array:
*/
struct tracer {
const char *name;
void (*init)(struct trace_array *tr);
/* Your tracer should raise a warning if init fails */
int (*init)(struct trace_array *tr);
void (*reset)(struct trace_array *tr);
void (*start)(struct trace_array *tr);
void (*stop)(struct trace_array *tr);
void (*open)(struct trace_iterator *iter);
void (*pipe_open)(struct trace_iterator *iter);
void (*close)(struct trace_iterator *iter);
void (*start)(struct trace_iterator *iter);
void (*stop)(struct trace_iterator *iter);
ssize_t (*read)(struct trace_iterator *iter,
struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos);
void (*ctrl_update)(struct trace_array *tr);
#ifdef CONFIG_FTRACE_STARTUP_TEST
int (*selftest)(struct tracer *trace,
struct trace_array *tr);
#endif
void (*print_header)(struct seq_file *m);
enum print_line_t (*print_line)(struct trace_iterator *iter);
/* If you handled the flag setting, return 0 */
int (*set_flag)(u32 old_flags, u32 bit, int set);
struct tracer *next;
int print_max;
struct tracer_flags *flags;
};
struct trace_seq {
@@ -279,10 +367,14 @@ struct trace_iterator {
unsigned long iter_flags;
loff_t pos;
long idx;
cpumask_t started;
};
int tracing_is_enabled(void);
void trace_wake_up(void);
void tracing_reset(struct trace_array *tr, int cpu);
void tracing_reset_online_cpus(struct trace_array *tr);
int tracing_open_generic(struct inode *inode, struct file *filp);
struct dentry *tracing_init_dentry(void);
void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
@@ -321,8 +413,15 @@ void trace_function(struct trace_array *tr,
unsigned long parent_ip,
unsigned long flags, int pc);
void trace_graph_return(struct ftrace_graph_ret *trace);
int trace_graph_entry(struct ftrace_graph_ent *trace);
void trace_hw_branch(struct trace_array *tr, u64 from, u64 to);
void tracing_start_cmdline_record(void);
void tracing_stop_cmdline_record(void);
void tracing_sched_switch_assign_trace(struct trace_array *tr);
void tracing_stop_sched_switch_record(void);
void tracing_start_sched_switch_record(void);
int register_tracer(struct tracer *type);
void unregister_tracer(struct tracer *type);
@@ -358,6 +457,7 @@ struct tracer_switch_ops {
struct tracer_switch_ops *next;
};
char *trace_find_cmdline(int pid);
#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -383,19 +483,79 @@ extern int trace_selftest_startup_sched_switch(struct tracer *trace,
struct trace_array *tr);
extern int trace_selftest_startup_sysprof(struct tracer *trace,
struct trace_array *tr);
extern int trace_selftest_startup_branch(struct tracer *trace,
struct trace_array *tr);
#endif /* CONFIG_FTRACE_STARTUP_TEST */
extern void *head_page(struct trace_array_cpu *data);
extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
extern void trace_seq_print_cont(struct trace_seq *s,
struct trace_iterator *iter);
extern int
seq_print_ip_sym(struct trace_seq *s, unsigned long ip,
unsigned long sym_flags);
extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
size_t cnt);
extern long ns2usecs(cycle_t nsec);
extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args);
extern int
trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args);
extern unsigned long trace_flags;
/* Standard output formatting function used for function return traces */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
extern enum print_line_t print_graph_function(struct trace_iterator *iter);
#ifdef CONFIG_DYNAMIC_FTRACE
/* TODO: make this variable */
#define FTRACE_GRAPH_MAX_FUNCS 32
extern int ftrace_graph_count;
extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
static inline int ftrace_graph_addr(unsigned long addr)
{
int i;
if (!ftrace_graph_count || test_tsk_trace_graph(current))
return 1;
for (i = 0; i < ftrace_graph_count; i++) {
if (addr == ftrace_graph_funcs[i])
return 1;
}
return 0;
}
#else
static inline int ftrace_trace_addr(unsigned long addr)
{
return 1;
}
static inline int ftrace_graph_addr(unsigned long addr)
{
return 1;
}
#endif /* CONFIG_DYNAMIC_FTRACE */
#else /* CONFIG_FUNCTION_GRAPH_TRACER */
static inline enum print_line_t
print_graph_function(struct trace_iterator *iter)
{
return TRACE_TYPE_UNHANDLED;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
extern struct pid *ftrace_pid_trace;
static inline int ftrace_trace_task(struct task_struct *task)
{
if (!ftrace_pid_trace)
return 1;
return test_tsk_trace_trace(task);
}
/*
* trace_iterator_flags is an enumeration that defines bit
* positions into trace_flags that controls the output.
@@ -415,8 +575,93 @@ enum trace_iterator_flags {
TRACE_ITER_STACKTRACE = 0x100,
TRACE_ITER_SCHED_TREE = 0x200,
TRACE_ITER_PRINTK = 0x400,
TRACE_ITER_PREEMPTONLY = 0x800,
TRACE_ITER_BRANCH = 0x1000,
TRACE_ITER_ANNOTATE = 0x2000,
TRACE_ITER_USERSTACKTRACE = 0x4000,
TRACE_ITER_SYM_USEROBJ = 0x8000,
TRACE_ITER_PRINTK_MSGONLY = 0x10000
};
/*
* TRACE_ITER_SYM_MASK masks the options in trace_flags that
* control the output of kernel symbols.
*/
#define TRACE_ITER_SYM_MASK \
(TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
extern struct tracer nop_trace;
/**
* ftrace_preempt_disable - disable preemption scheduler safe
*
* When tracing can happen inside the scheduler, there exists
* cases that the tracing might happen before the need_resched
* flag is checked. If this happens and the tracer calls
* preempt_enable (after a disable), a schedule might take place
* causing an infinite recursion.
*
* To prevent this, we read the need_recshed flag before
* disabling preemption. When we want to enable preemption we
* check the flag, if it is set, then we call preempt_enable_no_resched.
* Otherwise, we call preempt_enable.
*
* The rational for doing the above is that if need resched is set
* and we have yet to reschedule, we are either in an atomic location
* (where we do not need to check for scheduling) or we are inside
* the scheduler and do not want to resched.
*/
static inline int ftrace_preempt_disable(void)
{
int resched;
resched = need_resched();
preempt_disable_notrace();
return resched;
}
/**
* ftrace_preempt_enable - enable preemption scheduler safe
* @resched: the return value from ftrace_preempt_disable
*
* This is a scheduler safe way to enable preemption and not miss
* any preemption checks. The disabled saved the state of preemption.
* If resched is set, then we were either inside an atomic or
* are inside the scheduler (we would have already scheduled
* otherwise). In this case, we do not want to call normal
* preempt_enable, but preempt_enable_no_resched instead.
*/
static inline void ftrace_preempt_enable(int resched)
{
if (resched)
preempt_enable_no_resched_notrace();
else
preempt_enable_notrace();
}
#ifdef CONFIG_BRANCH_TRACER
extern int enable_branch_tracing(struct trace_array *tr);
extern void disable_branch_tracing(void);
static inline int trace_branch_enable(struct trace_array *tr)
{
if (trace_flags & TRACE_ITER_BRANCH)
return enable_branch_tracing(tr);
return 0;
}
static inline void trace_branch_disable(void)
{
/* due to races, always disable */
disable_branch_tracing();
}
#else
static inline int trace_branch_enable(struct trace_array *tr)
{
return 0;
}
static inline void trace_branch_disable(void)
{
}
#endif /* CONFIG_BRANCH_TRACER */
#endif /* _LINUX_KERNEL_TRACE_H */

View File

@@ -13,101 +13,132 @@
#include "trace.h"
static struct trace_array *boot_trace;
static int trace_boot_enabled;
static bool pre_initcalls_finished;
/* Should be started after do_pre_smp_initcalls() in init/main.c */
/* Tells the boot tracer that the pre_smp_initcalls are finished.
* So we are ready .
* It doesn't enable sched events tracing however.
* You have to call enable_boot_trace to do so.
*/
void start_boot_trace(void)
{
trace_boot_enabled = 1;
pre_initcalls_finished = true;
}
void stop_boot_trace(void)
void enable_boot_trace(void)
{
trace_boot_enabled = 0;
if (pre_initcalls_finished)
tracing_start_sched_switch_record();
}
void reset_boot_trace(struct trace_array *tr)
void disable_boot_trace(void)
{
stop_boot_trace();
if (pre_initcalls_finished)
tracing_stop_sched_switch_record();
}
static void boot_trace_init(struct trace_array *tr)
static int boot_trace_init(struct trace_array *tr)
{
int cpu;
boot_trace = tr;
trace_boot_enabled = 0;
for_each_cpu_mask(cpu, cpu_possible_map)
tracing_reset(tr, cpu);
tracing_sched_switch_assign_trace(tr);
return 0;
}
static void boot_trace_ctrl_update(struct trace_array *tr)
static enum print_line_t
initcall_call_print_line(struct trace_iterator *iter)
{
if (tr->ctrl)
start_boot_trace();
struct trace_entry *entry = iter->ent;
struct trace_seq *s = &iter->seq;
struct trace_boot_call *field;
struct boot_trace_call *call;
u64 ts;
unsigned long nsec_rem;
int ret;
trace_assign_type(field, entry);
call = &field->boot_call;
ts = iter->ts;
nsec_rem = do_div(ts, 1000000000);
ret = trace_seq_printf(s, "[%5ld.%09ld] calling %s @ %i\n",
(unsigned long)ts, nsec_rem, call->func, call->caller);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
else
stop_boot_trace();
return TRACE_TYPE_HANDLED;
}
static enum print_line_t
initcall_ret_print_line(struct trace_iterator *iter)
{
struct trace_entry *entry = iter->ent;
struct trace_seq *s = &iter->seq;
struct trace_boot_ret *field;
struct boot_trace_ret *init_ret;
u64 ts;
unsigned long nsec_rem;
int ret;
trace_assign_type(field, entry);
init_ret = &field->boot_ret;
ts = iter->ts;
nsec_rem = do_div(ts, 1000000000);
ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s "
"returned %d after %llu msecs\n",
(unsigned long) ts,
nsec_rem,
init_ret->func, init_ret->result, init_ret->duration);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
else
return TRACE_TYPE_HANDLED;
}
static enum print_line_t initcall_print_line(struct trace_iterator *iter)
{
int ret;
struct trace_entry *entry = iter->ent;
struct trace_boot *field = (struct trace_boot *)entry;
struct boot_trace *it = &field->initcall;
struct trace_seq *s = &iter->seq;
struct timespec calltime = ktime_to_timespec(it->calltime);
struct timespec rettime = ktime_to_timespec(it->rettime);
if (entry->type == TRACE_BOOT) {
ret = trace_seq_printf(s, "[%5ld.%09ld] calling %s @ %i\n",
calltime.tv_sec,
calltime.tv_nsec,
it->func, it->caller);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s "
"returned %d after %lld msecs\n",
rettime.tv_sec,
rettime.tv_nsec,
it->func, it->result, it->duration);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
return TRACE_TYPE_HANDLED;
switch (entry->type) {
case TRACE_BOOT_CALL:
return initcall_call_print_line(iter);
case TRACE_BOOT_RET:
return initcall_ret_print_line(iter);
default:
return TRACE_TYPE_UNHANDLED;
}
return TRACE_TYPE_UNHANDLED;
}
struct tracer boot_tracer __read_mostly =
{
.name = "initcall",
.init = boot_trace_init,
.reset = reset_boot_trace,
.ctrl_update = boot_trace_ctrl_update,
.reset = tracing_reset_online_cpus,
.print_line = initcall_print_line,
};
void trace_boot(struct boot_trace *it, initcall_t fn)
void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
{
struct ring_buffer_event *event;
struct trace_boot *entry;
struct trace_array_cpu *data;
struct trace_boot_call *entry;
unsigned long irq_flags;
struct trace_array *tr = boot_trace;
if (!trace_boot_enabled)
if (!pre_initcalls_finished)
return;
/* Get its name now since this function could
* disappear because it is in the .init section.
*/
sprint_symbol(it->func, (unsigned long)fn);
sprint_symbol(bt->func, (unsigned long)fn);
preempt_disable();
data = tr->data[smp_processor_id()];
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
&irq_flags);
@@ -115,8 +146,37 @@ void trace_boot(struct boot_trace *it, initcall_t fn)
goto out;
entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, 0, 0);
entry->ent.type = TRACE_BOOT;
entry->initcall = *it;
entry->ent.type = TRACE_BOOT_CALL;
entry->boot_call = *bt;
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
trace_wake_up();
out:
preempt_enable();
}
void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn)
{
struct ring_buffer_event *event;
struct trace_boot_ret *entry;
unsigned long irq_flags;
struct trace_array *tr = boot_trace;
if (!pre_initcalls_finished)
return;
sprint_symbol(bt->func, (unsigned long)fn);
preempt_disable();
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
&irq_flags);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, 0, 0);
entry->ent.type = TRACE_BOOT_RET;
entry->boot_ret = *bt;
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
trace_wake_up();

342
kernel/trace/trace_branch.c Normal file
View File

@@ -0,0 +1,342 @@
/*
* unlikely profiler
*
* Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
*/
#include <linux/kallsyms.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/irqflags.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/ftrace.h>
#include <linux/hash.h>
#include <linux/fs.h>
#include <asm/local.h>
#include "trace.h"
#ifdef CONFIG_BRANCH_TRACER
static int branch_tracing_enabled __read_mostly;
static DEFINE_MUTEX(branch_tracing_mutex);
static struct trace_array *branch_tracer;
static void
probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
{
struct trace_array *tr = branch_tracer;
struct ring_buffer_event *event;
struct trace_branch *entry;
unsigned long flags, irq_flags;
int cpu, pc;
const char *p;
/*
* I would love to save just the ftrace_likely_data pointer, but
* this code can also be used by modules. Ugly things can happen
* if the module is unloaded, and then we go and read the
* pointer. This is slower, but much safer.
*/
if (unlikely(!tr))
return;
local_irq_save(flags);
cpu = raw_smp_processor_id();
if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
goto out;
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
&irq_flags);
if (!event)
goto out;
pc = preempt_count();
entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, flags, pc);
entry->ent.type = TRACE_BRANCH;
/* Strip off the path, only save the file */
p = f->file + strlen(f->file);
while (p >= f->file && *p != '/')
p--;
p++;
strncpy(entry->func, f->func, TRACE_FUNC_SIZE);
strncpy(entry->file, p, TRACE_FILE_SIZE);
entry->func[TRACE_FUNC_SIZE] = 0;
entry->file[TRACE_FILE_SIZE] = 0;
entry->line = f->line;
entry->correct = val == expect;
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
out:
atomic_dec(&tr->data[cpu]->disabled);
local_irq_restore(flags);
}
static inline
void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
{
if (!branch_tracing_enabled)
return;
probe_likely_condition(f, val, expect);
}
int enable_branch_tracing(struct trace_array *tr)
{
int ret = 0;
mutex_lock(&branch_tracing_mutex);
branch_tracer = tr;
/*
* Must be seen before enabling. The reader is a condition
* where we do not need a matching rmb()
*/
smp_wmb();
branch_tracing_enabled++;
mutex_unlock(&branch_tracing_mutex);
return ret;
}
void disable_branch_tracing(void)
{
mutex_lock(&branch_tracing_mutex);
if (!branch_tracing_enabled)
goto out_unlock;
branch_tracing_enabled--;
out_unlock:
mutex_unlock(&branch_tracing_mutex);
}
static void start_branch_trace(struct trace_array *tr)
{
enable_branch_tracing(tr);
}
static void stop_branch_trace(struct trace_array *tr)
{
disable_branch_tracing();
}
static int branch_trace_init(struct trace_array *tr)
{
int cpu;
for_each_online_cpu(cpu)
tracing_reset(tr, cpu);
start_branch_trace(tr);
return 0;
}
static void branch_trace_reset(struct trace_array *tr)
{
stop_branch_trace(tr);
}
struct tracer branch_trace __read_mostly =
{
.name = "branch",
.init = branch_trace_init,
.reset = branch_trace_reset,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_branch,
#endif
};
__init static int init_branch_trace(void)
{
return register_tracer(&branch_trace);
}
device_initcall(init_branch_trace);
#else
static inline
void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
{
}
#endif /* CONFIG_BRANCH_TRACER */
void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect)
{
/*
* I would love to have a trace point here instead, but the
* trace point code is so inundated with unlikely and likely
* conditions that the recursive nightmare that exists is too
* much to try to get working. At least for now.
*/
trace_likely_condition(f, val, expect);
/* FIXME: Make this atomic! */
if (val == expect)
f->correct++;
else
f->incorrect++;
}
EXPORT_SYMBOL(ftrace_likely_update);
struct ftrace_pointer {
void *start;
void *stop;
int hit;
};
static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
{
const struct ftrace_pointer *f = m->private;
struct ftrace_branch_data *p = v;
(*pos)++;
if (v == (void *)1)
return f->start;
++p;
if ((void *)p >= (void *)f->stop)
return NULL;
return p;
}
static void *t_start(struct seq_file *m, loff_t *pos)
{
void *t = (void *)1;
loff_t l = 0;
for (; t && l < *pos; t = t_next(m, t, &l))
;
return t;
}
static void t_stop(struct seq_file *m, void *p)
{
}
static int t_show(struct seq_file *m, void *v)
{
const struct ftrace_pointer *fp = m->private;
struct ftrace_branch_data *p = v;
const char *f;
long percent;
if (v == (void *)1) {
if (fp->hit)
seq_printf(m, " miss hit %% ");
else
seq_printf(m, " correct incorrect %% ");
seq_printf(m, " Function "
" File Line\n"
" ------- --------- - "
" -------- "
" ---- ----\n");
return 0;
}
/* Only print the file, not the path */
f = p->file + strlen(p->file);
while (f >= p->file && *f != '/')
f--;
f++;
/*
* The miss is overlayed on correct, and hit on incorrect.
*/
if (p->correct) {
percent = p->incorrect * 100;
percent /= p->correct + p->incorrect;
} else
percent = p->incorrect ? 100 : -1;
seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect);
if (percent < 0)
seq_printf(m, " X ");
else
seq_printf(m, "%3ld ", percent);
seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line);
return 0;
}
static struct seq_operations tracing_likely_seq_ops = {
.start = t_start,
.next = t_next,
.stop = t_stop,
.show = t_show,
};
static int tracing_branch_open(struct inode *inode, struct file *file)
{
int ret;
ret = seq_open(file, &tracing_likely_seq_ops);
if (!ret) {
struct seq_file *m = file->private_data;
m->private = (void *)inode->i_private;
}
return ret;
}
static const struct file_operations tracing_branch_fops = {
.open = tracing_branch_open,
.read = seq_read,
.llseek = seq_lseek,
};
#ifdef CONFIG_PROFILE_ALL_BRANCHES
extern unsigned long __start_branch_profile[];
extern unsigned long __stop_branch_profile[];
static const struct ftrace_pointer ftrace_branch_pos = {
.start = __start_branch_profile,
.stop = __stop_branch_profile,
.hit = 1,
};
#endif /* CONFIG_PROFILE_ALL_BRANCHES */
extern unsigned long __start_annotated_branch_profile[];
extern unsigned long __stop_annotated_branch_profile[];
static const struct ftrace_pointer ftrace_annotated_branch_pos = {
.start = __start_annotated_branch_profile,
.stop = __stop_annotated_branch_profile,
};
static __init int ftrace_branch_init(void)
{
struct dentry *d_tracer;
struct dentry *entry;
d_tracer = tracing_init_dentry();
entry = debugfs_create_file("profile_annotated_branch", 0444, d_tracer,
(void *)&ftrace_annotated_branch_pos,
&tracing_branch_fops);
if (!entry)
pr_warning("Could not create debugfs "
"'profile_annotatet_branch' entry\n");
#ifdef CONFIG_PROFILE_ALL_BRANCHES
entry = debugfs_create_file("profile_branch", 0444, d_tracer,
(void *)&ftrace_branch_pos,
&tracing_branch_fops);
if (!entry)
pr_warning("Could not create debugfs"
" 'profile_branch' entry\n");
#endif
return 0;
}
device_initcall(ftrace_branch_init);

View File

@@ -16,20 +16,10 @@
#include "trace.h"
static void function_reset(struct trace_array *tr)
{
int cpu;
tr->time_start = ftrace_now(tr->cpu);
for_each_online_cpu(cpu)
tracing_reset(tr, cpu);
}
static void start_function_trace(struct trace_array *tr)
{
tr->cpu = get_cpu();
function_reset(tr);
tracing_reset_online_cpus(tr);
put_cpu();
tracing_start_cmdline_record();
@@ -42,24 +32,20 @@ static void stop_function_trace(struct trace_array *tr)
tracing_stop_cmdline_record();
}
static void function_trace_init(struct trace_array *tr)
static int function_trace_init(struct trace_array *tr)
{
if (tr->ctrl)
start_function_trace(tr);
start_function_trace(tr);
return 0;
}
static void function_trace_reset(struct trace_array *tr)
{
if (tr->ctrl)
stop_function_trace(tr);
stop_function_trace(tr);
}
static void function_trace_ctrl_update(struct trace_array *tr)
static void function_trace_start(struct trace_array *tr)
{
if (tr->ctrl)
start_function_trace(tr);
else
stop_function_trace(tr);
tracing_reset_online_cpus(tr);
}
static struct tracer function_trace __read_mostly =
@@ -67,7 +53,7 @@ static struct tracer function_trace __read_mostly =
.name = "function",
.init = function_trace_init,
.reset = function_trace_reset,
.ctrl_update = function_trace_ctrl_update,
.start = function_trace_start,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_function,
#endif

View File

@@ -0,0 +1,669 @@
/*
*
* Function graph tracer.
* Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
* Mostly borrowed from function tracer which
* is Copyright (c) Steven Rostedt <srostedt@redhat.com>
*
*/
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
#include <linux/fs.h>
#include "trace.h"
#define TRACE_GRAPH_INDENT 2
/* Flag options */
#define TRACE_GRAPH_PRINT_OVERRUN 0x1
#define TRACE_GRAPH_PRINT_CPU 0x2
#define TRACE_GRAPH_PRINT_OVERHEAD 0x4
#define TRACE_GRAPH_PRINT_PROC 0x8
static struct tracer_opt trace_opts[] = {
/* Display overruns ? */
{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
/* Display CPU ? */
{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
/* Display Overhead ? */
{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
/* Display proc name/pid */
{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
{ } /* Empty entry */
};
static struct tracer_flags tracer_flags = {
/* Don't display overruns and proc by default */
.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD,
.opts = trace_opts
};
/* pid on the last trace processed */
static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 };
static int graph_trace_init(struct trace_array *tr)
{
int cpu, ret;
for_each_online_cpu(cpu)
tracing_reset(tr, cpu);
ret = register_ftrace_graph(&trace_graph_return,
&trace_graph_entry);
if (ret)
return ret;
tracing_start_cmdline_record();
return 0;
}
static void graph_trace_reset(struct trace_array *tr)
{
tracing_stop_cmdline_record();
unregister_ftrace_graph();
}
static inline int log10_cpu(int nb)
{
if (nb / 100)
return 3;
if (nb / 10)
return 2;
return 1;
}
static enum print_line_t
print_graph_cpu(struct trace_seq *s, int cpu)
{
int i;
int ret;
int log10_this = log10_cpu(cpu);
int log10_all = log10_cpu(cpus_weight_nr(cpu_online_map));
/*
* Start with a space character - to make it stand out
* to the right a bit when trace output is pasted into
* email:
*/
ret = trace_seq_printf(s, " ");
/*
* Tricky - we space the CPU field according to the max
* number of online CPUs. On a 2-cpu system it would take
* a maximum of 1 digit - on a 128 cpu system it would
* take up to 3 digits:
*/
for (i = 0; i < log10_all - log10_this; i++) {
ret = trace_seq_printf(s, " ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
ret = trace_seq_printf(s, "%d) ", cpu);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
return TRACE_TYPE_HANDLED;
}
#define TRACE_GRAPH_PROCINFO_LENGTH 14
static enum print_line_t
print_graph_proc(struct trace_seq *s, pid_t pid)
{
int i;
int ret;
int len;
char comm[8];
int spaces = 0;
/* sign + log10(MAX_INT) + '\0' */
char pid_str[11];
strncpy(comm, trace_find_cmdline(pid), 7);
comm[7] = '\0';
sprintf(pid_str, "%d", pid);
/* 1 stands for the "-" character */
len = strlen(comm) + strlen(pid_str) + 1;
if (len < TRACE_GRAPH_PROCINFO_LENGTH)
spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
/* First spaces to align center */
for (i = 0; i < spaces / 2; i++) {
ret = trace_seq_printf(s, " ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
/* Last spaces to align center */
for (i = 0; i < spaces - (spaces / 2); i++) {
ret = trace_seq_printf(s, " ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
return TRACE_TYPE_HANDLED;
}
/* If the pid changed since the last trace, output this event */
static enum print_line_t
verif_pid(struct trace_seq *s, pid_t pid, int cpu)
{
pid_t prev_pid;
int ret;
if (last_pid[cpu] != -1 && last_pid[cpu] == pid)
return TRACE_TYPE_HANDLED;
prev_pid = last_pid[cpu];
last_pid[cpu] = pid;
/*
* Context-switch trace line:
------------------------------------------
| 1) migration/0--1 => sshd-1755
------------------------------------------
*/
ret = trace_seq_printf(s,
" ------------------------------------------\n");
if (!ret)
TRACE_TYPE_PARTIAL_LINE;
ret = print_graph_cpu(s, cpu);
if (ret == TRACE_TYPE_PARTIAL_LINE)
TRACE_TYPE_PARTIAL_LINE;
ret = print_graph_proc(s, prev_pid);
if (ret == TRACE_TYPE_PARTIAL_LINE)
TRACE_TYPE_PARTIAL_LINE;
ret = trace_seq_printf(s, " => ");
if (!ret)
TRACE_TYPE_PARTIAL_LINE;
ret = print_graph_proc(s, pid);
if (ret == TRACE_TYPE_PARTIAL_LINE)
TRACE_TYPE_PARTIAL_LINE;
ret = trace_seq_printf(s,
"\n ------------------------------------------\n\n");
if (!ret)
TRACE_TYPE_PARTIAL_LINE;
return ret;
}
static bool
trace_branch_is_leaf(struct trace_iterator *iter,
struct ftrace_graph_ent_entry *curr)
{
struct ring_buffer_iter *ring_iter;
struct ring_buffer_event *event;
struct ftrace_graph_ret_entry *next;
ring_iter = iter->buffer_iter[iter->cpu];
if (!ring_iter)
return false;
event = ring_buffer_iter_peek(ring_iter, NULL);
if (!event)
return false;
next = ring_buffer_event_data(event);
if (next->ent.type != TRACE_GRAPH_RET)
return false;
if (curr->ent.pid != next->ent.pid ||
curr->graph_ent.func != next->ret.func)
return false;
return true;
}
static enum print_line_t
print_graph_irq(struct trace_seq *s, unsigned long addr,
enum trace_type type, int cpu, pid_t pid)
{
int ret;
if (addr < (unsigned long)__irqentry_text_start ||
addr >= (unsigned long)__irqentry_text_end)
return TRACE_TYPE_UNHANDLED;
if (type == TRACE_GRAPH_ENT) {
ret = trace_seq_printf(s, "==========> | ");
} else {
/* Cpu */
if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
ret = print_graph_cpu(s, cpu);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
}
/* Proc */
if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
ret = print_graph_proc(s, pid);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
ret = trace_seq_printf(s, " | ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
/* No overhead */
if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
ret = trace_seq_printf(s, " ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
ret = trace_seq_printf(s, "<========== |\n");
}
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
return TRACE_TYPE_HANDLED;
}
static enum print_line_t
print_graph_duration(unsigned long long duration, struct trace_seq *s)
{
unsigned long nsecs_rem = do_div(duration, 1000);
/* log10(ULONG_MAX) + '\0' */
char msecs_str[21];
char nsecs_str[5];
int ret, len;
int i;
sprintf(msecs_str, "%lu", (unsigned long) duration);
/* Print msecs */
ret = trace_seq_printf(s, msecs_str);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
len = strlen(msecs_str);
/* Print nsecs (we don't want to exceed 7 numbers) */
if (len < 7) {
snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem);
ret = trace_seq_printf(s, ".%s", nsecs_str);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
len += strlen(nsecs_str);
}
ret = trace_seq_printf(s, " us ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
/* Print remaining spaces to fit the row's width */
for (i = len; i < 7; i++) {
ret = trace_seq_printf(s, " ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
ret = trace_seq_printf(s, "| ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
return TRACE_TYPE_HANDLED;
}
/* Signal a overhead of time execution to the output */
static int
print_graph_overhead(unsigned long long duration, struct trace_seq *s)
{
/* Duration exceeded 100 msecs */
if (duration > 100000ULL)
return trace_seq_printf(s, "! ");
/* Duration exceeded 10 msecs */
if (duration > 10000ULL)
return trace_seq_printf(s, "+ ");
return trace_seq_printf(s, " ");
}
/* Case of a leaf function on its call entry */
static enum print_line_t
print_graph_entry_leaf(struct trace_iterator *iter,
struct ftrace_graph_ent_entry *entry, struct trace_seq *s)
{
struct ftrace_graph_ret_entry *ret_entry;
struct ftrace_graph_ret *graph_ret;
struct ring_buffer_event *event;
struct ftrace_graph_ent *call;
unsigned long long duration;
int ret;
int i;
event = ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
ret_entry = ring_buffer_event_data(event);
graph_ret = &ret_entry->ret;
call = &entry->graph_ent;
duration = graph_ret->rettime - graph_ret->calltime;
/* Overhead */
if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
ret = print_graph_overhead(duration, s);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
/* Duration */
ret = print_graph_duration(duration, s);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
/* Function */
for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
ret = trace_seq_printf(s, " ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
ret = seq_print_ip_sym(s, call->func, 0);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
ret = trace_seq_printf(s, "();\n");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
return TRACE_TYPE_HANDLED;
}
static enum print_line_t
print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
struct trace_seq *s, pid_t pid, int cpu)
{
int i;
int ret;
struct ftrace_graph_ent *call = &entry->graph_ent;
/* No overhead */
if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
ret = trace_seq_printf(s, " ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
/* Interrupt */
ret = print_graph_irq(s, call->func, TRACE_GRAPH_ENT, cpu, pid);
if (ret == TRACE_TYPE_UNHANDLED) {
/* No time */
ret = trace_seq_printf(s, " | ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
} else {
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
}
/* Function */
for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
ret = trace_seq_printf(s, " ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
ret = seq_print_ip_sym(s, call->func, 0);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
ret = trace_seq_printf(s, "() {\n");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
return TRACE_TYPE_HANDLED;
}
static enum print_line_t
print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
struct trace_iterator *iter, int cpu)
{
int ret;
struct trace_entry *ent = iter->ent;
/* Pid */
if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
/* Cpu */
if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
ret = print_graph_cpu(s, cpu);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
}
/* Proc */
if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
ret = print_graph_proc(s, ent->pid);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
ret = trace_seq_printf(s, " | ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
if (trace_branch_is_leaf(iter, field))
return print_graph_entry_leaf(iter, field, s);
else
return print_graph_entry_nested(field, s, iter->ent->pid, cpu);
}
static enum print_line_t
print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
struct trace_entry *ent, int cpu)
{
int i;
int ret;
unsigned long long duration = trace->rettime - trace->calltime;
/* Pid */
if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
/* Cpu */
if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
ret = print_graph_cpu(s, cpu);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
}
/* Proc */
if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
ret = print_graph_proc(s, ent->pid);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
ret = trace_seq_printf(s, " | ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
/* Overhead */
if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
ret = print_graph_overhead(duration, s);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
/* Duration */
ret = print_graph_duration(duration, s);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
/* Closing brace */
for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
ret = trace_seq_printf(s, " ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
ret = trace_seq_printf(s, "}\n");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
/* Overrun */
if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
ret = trace_seq_printf(s, " (Overruns: %lu)\n",
trace->overrun);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
ret = print_graph_irq(s, trace->func, TRACE_GRAPH_RET, cpu, ent->pid);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
return TRACE_TYPE_HANDLED;
}
static enum print_line_t
print_graph_comment(struct print_entry *trace, struct trace_seq *s,
struct trace_entry *ent, struct trace_iterator *iter)
{
int i;
int ret;
/* Pid */
if (verif_pid(s, ent->pid, iter->cpu) == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
/* Cpu */
if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
ret = print_graph_cpu(s, iter->cpu);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
}
/* Proc */
if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
ret = print_graph_proc(s, ent->pid);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
ret = trace_seq_printf(s, " | ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
/* No overhead */
if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
ret = trace_seq_printf(s, " ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
/* No time */
ret = trace_seq_printf(s, " | ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
/* Indentation */
if (trace->depth > 0)
for (i = 0; i < (trace->depth + 1) * TRACE_GRAPH_INDENT; i++) {
ret = trace_seq_printf(s, " ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
/* The comment */
ret = trace_seq_printf(s, "/* %s", trace->buf);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
if (ent->flags & TRACE_FLAG_CONT)
trace_seq_print_cont(s, iter);
ret = trace_seq_printf(s, " */\n");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
return TRACE_TYPE_HANDLED;
}
enum print_line_t
print_graph_function(struct trace_iterator *iter)
{
struct trace_seq *s = &iter->seq;
struct trace_entry *entry = iter->ent;
switch (entry->type) {
case TRACE_GRAPH_ENT: {
struct ftrace_graph_ent_entry *field;
trace_assign_type(field, entry);
return print_graph_entry(field, s, iter,
iter->cpu);
}
case TRACE_GRAPH_RET: {
struct ftrace_graph_ret_entry *field;
trace_assign_type(field, entry);
return print_graph_return(&field->ret, s, entry, iter->cpu);
}
case TRACE_PRINT: {
struct print_entry *field;
trace_assign_type(field, entry);
return print_graph_comment(field, s, entry, iter);
}
default:
return TRACE_TYPE_UNHANDLED;
}
}
static void print_graph_headers(struct seq_file *s)
{
/* 1st line */
seq_printf(s, "# ");
if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
seq_printf(s, "CPU ");
if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
seq_printf(s, "TASK/PID ");
if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD)
seq_printf(s, "OVERHEAD/");
seq_printf(s, "DURATION FUNCTION CALLS\n");
/* 2nd line */
seq_printf(s, "# ");
if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
seq_printf(s, "| ");
if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
seq_printf(s, "| | ");
if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
seq_printf(s, "| ");
seq_printf(s, "| | | | |\n");
} else
seq_printf(s, " | | | | |\n");
}
static struct tracer graph_trace __read_mostly = {
.name = "function_graph",
.init = graph_trace_init,
.reset = graph_trace_reset,
.print_line = print_graph_function,
.print_header = print_graph_headers,
.flags = &tracer_flags,
};
static __init int init_graph_trace(void)
{
return register_tracer(&graph_trace);
}
device_initcall(init_graph_trace);

View File

@@ -0,0 +1,195 @@
/*
* h/w branch tracer for x86 based on bts
*
* Copyright (C) 2008 Markus Metzger <markus.t.metzger@gmail.com>
*
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/debugfs.h>
#include <linux/ftrace.h>
#include <linux/kallsyms.h>
#include <asm/ds.h>
#include "trace.h"
#define SIZEOF_BTS (1 << 13)
static DEFINE_PER_CPU(struct bts_tracer *, tracer);
static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer);
#define this_tracer per_cpu(tracer, smp_processor_id())
#define this_buffer per_cpu(buffer, smp_processor_id())
static void bts_trace_start_cpu(void *arg)
{
if (this_tracer)
ds_release_bts(this_tracer);
this_tracer =
ds_request_bts(/* task = */ NULL, this_buffer, SIZEOF_BTS,
/* ovfl = */ NULL, /* th = */ (size_t)-1,
BTS_KERNEL);
if (IS_ERR(this_tracer)) {
this_tracer = NULL;
return;
}
}
static void bts_trace_start(struct trace_array *tr)
{
int cpu;
tracing_reset_online_cpus(tr);
for_each_cpu_mask(cpu, cpu_possible_map)
smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1);
}
static void bts_trace_stop_cpu(void *arg)
{
if (this_tracer) {
ds_release_bts(this_tracer);
this_tracer = NULL;
}
}
static void bts_trace_stop(struct trace_array *tr)
{
int cpu;
for_each_cpu_mask(cpu, cpu_possible_map)
smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1);
}
static int bts_trace_init(struct trace_array *tr)
{
tracing_reset_online_cpus(tr);
bts_trace_start(tr);
return 0;
}
static void bts_trace_print_header(struct seq_file *m)
{
seq_puts(m,
"# CPU# FROM TO FUNCTION\n");
seq_puts(m,
"# | | | |\n");
}
static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
{
struct trace_entry *entry = iter->ent;
struct trace_seq *seq = &iter->seq;
struct hw_branch_entry *it;
trace_assign_type(it, entry);
if (entry->type == TRACE_HW_BRANCHES) {
if (trace_seq_printf(seq, "%4d ", entry->cpu) &&
trace_seq_printf(seq, "0x%016llx -> 0x%016llx ",
it->from, it->to) &&
(!it->from ||
seq_print_ip_sym(seq, it->from, /* sym_flags = */ 0)) &&
trace_seq_printf(seq, "\n"))
return TRACE_TYPE_HANDLED;
return TRACE_TYPE_PARTIAL_LINE;;
}
return TRACE_TYPE_UNHANDLED;
}
void trace_hw_branch(struct trace_array *tr, u64 from, u64 to)
{
struct ring_buffer_event *event;
struct hw_branch_entry *entry;
unsigned long irq;
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq);
if (!event)
return;
entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, 0, from);
entry->ent.type = TRACE_HW_BRANCHES;
entry->ent.cpu = smp_processor_id();
entry->from = from;
entry->to = to;
ring_buffer_unlock_commit(tr->buffer, event, irq);
}
static void trace_bts_at(struct trace_array *tr,
const struct bts_trace *trace, void *at)
{
struct bts_struct bts;
int err = 0;
WARN_ON_ONCE(!trace->read);
if (!trace->read)
return;
err = trace->read(this_tracer, at, &bts);
if (err < 0)
return;
switch (bts.qualifier) {
case BTS_BRANCH:
trace_hw_branch(tr, bts.variant.lbr.from, bts.variant.lbr.to);
break;
}
}
static void trace_bts_cpu(void *arg)
{
struct trace_array *tr = (struct trace_array *) arg;
const struct bts_trace *trace;
unsigned char *at;
if (!this_tracer)
return;
ds_suspend_bts(this_tracer);
trace = ds_read_bts(this_tracer);
if (!trace)
goto out;
for (at = trace->ds.top; (void *)at < trace->ds.end;
at += trace->ds.size)
trace_bts_at(tr, trace, at);
for (at = trace->ds.begin; (void *)at < trace->ds.top;
at += trace->ds.size)
trace_bts_at(tr, trace, at);
out:
ds_resume_bts(this_tracer);
}
static void trace_bts_prepare(struct trace_iterator *iter)
{
int cpu;
for_each_cpu_mask(cpu, cpu_possible_map)
smp_call_function_single(cpu, trace_bts_cpu, iter->tr, 1);
}
struct tracer bts_tracer __read_mostly =
{
.name = "hw-branch-tracer",
.init = bts_trace_init,
.reset = bts_trace_stop,
.print_header = bts_trace_print_header,
.print_line = bts_trace_print_line,
.start = bts_trace_start,
.stop = bts_trace_stop,
.open = trace_bts_prepare
};
__init static int init_bts_trace(void)
{
return register_tracer(&bts_tracer);
}
device_initcall(init_bts_trace);

View File

@@ -353,15 +353,28 @@ void trace_preempt_off(unsigned long a0, unsigned long a1)
}
#endif /* CONFIG_PREEMPT_TRACER */
/*
* save_tracer_enabled is used to save the state of the tracer_enabled
* variable when we disable it when we open a trace output file.
*/
static int save_tracer_enabled;
static void start_irqsoff_tracer(struct trace_array *tr)
{
register_ftrace_function(&trace_ops);
tracer_enabled = 1;
if (tracing_is_enabled()) {
tracer_enabled = 1;
save_tracer_enabled = 1;
} else {
tracer_enabled = 0;
save_tracer_enabled = 0;
}
}
static void stop_irqsoff_tracer(struct trace_array *tr)
{
tracer_enabled = 0;
save_tracer_enabled = 0;
unregister_ftrace_function(&trace_ops);
}
@@ -370,53 +383,55 @@ static void __irqsoff_tracer_init(struct trace_array *tr)
irqsoff_trace = tr;
/* make sure that the tracer is visible */
smp_wmb();
if (tr->ctrl)
start_irqsoff_tracer(tr);
start_irqsoff_tracer(tr);
}
static void irqsoff_tracer_reset(struct trace_array *tr)
{
if (tr->ctrl)
stop_irqsoff_tracer(tr);
stop_irqsoff_tracer(tr);
}
static void irqsoff_tracer_ctrl_update(struct trace_array *tr)
static void irqsoff_tracer_start(struct trace_array *tr)
{
if (tr->ctrl)
start_irqsoff_tracer(tr);
else
stop_irqsoff_tracer(tr);
tracer_enabled = 1;
save_tracer_enabled = 1;
}
static void irqsoff_tracer_stop(struct trace_array *tr)
{
tracer_enabled = 0;
save_tracer_enabled = 0;
}
static void irqsoff_tracer_open(struct trace_iterator *iter)
{
/* stop the trace while dumping */
if (iter->tr->ctrl)
stop_irqsoff_tracer(iter->tr);
tracer_enabled = 0;
}
static void irqsoff_tracer_close(struct trace_iterator *iter)
{
if (iter->tr->ctrl)
start_irqsoff_tracer(iter->tr);
/* restart tracing */
tracer_enabled = save_tracer_enabled;
}
#ifdef CONFIG_IRQSOFF_TRACER
static void irqsoff_tracer_init(struct trace_array *tr)
static int irqsoff_tracer_init(struct trace_array *tr)
{
trace_type = TRACER_IRQS_OFF;
__irqsoff_tracer_init(tr);
return 0;
}
static struct tracer irqsoff_tracer __read_mostly =
{
.name = "irqsoff",
.init = irqsoff_tracer_init,
.reset = irqsoff_tracer_reset,
.start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop,
.open = irqsoff_tracer_open,
.close = irqsoff_tracer_close,
.ctrl_update = irqsoff_tracer_ctrl_update,
.print_max = 1,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_irqsoff,
@@ -428,11 +443,12 @@ static struct tracer irqsoff_tracer __read_mostly =
#endif
#ifdef CONFIG_PREEMPT_TRACER
static void preemptoff_tracer_init(struct trace_array *tr)
static int preemptoff_tracer_init(struct trace_array *tr)
{
trace_type = TRACER_PREEMPT_OFF;
__irqsoff_tracer_init(tr);
return 0;
}
static struct tracer preemptoff_tracer __read_mostly =
@@ -440,9 +456,10 @@ static struct tracer preemptoff_tracer __read_mostly =
.name = "preemptoff",
.init = preemptoff_tracer_init,
.reset = irqsoff_tracer_reset,
.start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop,
.open = irqsoff_tracer_open,
.close = irqsoff_tracer_close,
.ctrl_update = irqsoff_tracer_ctrl_update,
.print_max = 1,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_preemptoff,
@@ -456,11 +473,12 @@ static struct tracer preemptoff_tracer __read_mostly =
#if defined(CONFIG_IRQSOFF_TRACER) && \
defined(CONFIG_PREEMPT_TRACER)
static void preemptirqsoff_tracer_init(struct trace_array *tr)
static int preemptirqsoff_tracer_init(struct trace_array *tr)
{
trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
__irqsoff_tracer_init(tr);
return 0;
}
static struct tracer preemptirqsoff_tracer __read_mostly =
@@ -468,9 +486,10 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
.name = "preemptirqsoff",
.init = preemptirqsoff_tracer_init,
.reset = irqsoff_tracer_reset,
.start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop,
.open = irqsoff_tracer_open,
.close = irqsoff_tracer_close,
.ctrl_update = irqsoff_tracer_ctrl_update,
.print_max = 1,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_preemptirqsoff,

View File

@@ -22,44 +22,35 @@ static unsigned long prev_overruns;
static void mmio_reset_data(struct trace_array *tr)
{
int cpu;
overrun_detected = false;
prev_overruns = 0;
tr->time_start = ftrace_now(tr->cpu);
for_each_online_cpu(cpu)
tracing_reset(tr, cpu);
tracing_reset_online_cpus(tr);
}
static void mmio_trace_init(struct trace_array *tr)
static int mmio_trace_init(struct trace_array *tr)
{
pr_debug("in %s\n", __func__);
mmio_trace_array = tr;
if (tr->ctrl) {
mmio_reset_data(tr);
enable_mmiotrace();
}
mmio_reset_data(tr);
enable_mmiotrace();
return 0;
}
static void mmio_trace_reset(struct trace_array *tr)
{
pr_debug("in %s\n", __func__);
if (tr->ctrl)
disable_mmiotrace();
disable_mmiotrace();
mmio_reset_data(tr);
mmio_trace_array = NULL;
}
static void mmio_trace_ctrl_update(struct trace_array *tr)
static void mmio_trace_start(struct trace_array *tr)
{
pr_debug("in %s\n", __func__);
if (tr->ctrl) {
mmio_reset_data(tr);
enable_mmiotrace();
} else {
disable_mmiotrace();
}
mmio_reset_data(tr);
}
static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev)
@@ -296,10 +287,10 @@ static struct tracer mmio_tracer __read_mostly =
.name = "mmiotrace",
.init = mmio_trace_init,
.reset = mmio_trace_reset,
.start = mmio_trace_start,
.pipe_open = mmio_pipe_open,
.close = mmio_close,
.read = mmio_read,
.ctrl_update = mmio_trace_ctrl_update,
.print_line = mmio_print_line,
};
@@ -371,5 +362,5 @@ void mmio_trace_mapping(struct mmiotrace_map *map)
int mmio_trace_printk(const char *fmt, va_list args)
{
return trace_vprintk(0, fmt, args);
return trace_vprintk(0, -1, fmt, args);
}

View File

@@ -12,6 +12,27 @@
#include "trace.h"
/* Our two options */
enum {
TRACE_NOP_OPT_ACCEPT = 0x1,
TRACE_NOP_OPT_REFUSE = 0x2
};
/* Options for the tracer (see trace_options file) */
static struct tracer_opt nop_opts[] = {
/* Option that will be accepted by set_flag callback */
{ TRACER_OPT(test_nop_accept, TRACE_NOP_OPT_ACCEPT) },
/* Option that will be refused by set_flag callback */
{ TRACER_OPT(test_nop_refuse, TRACE_NOP_OPT_REFUSE) },
{ } /* Always set a last empty entry */
};
static struct tracer_flags nop_flags = {
/* You can check your flags value here when you want. */
.val = 0, /* By default: all flags disabled */
.opts = nop_opts
};
static struct trace_array *ctx_trace;
static void start_nop_trace(struct trace_array *tr)
@@ -24,7 +45,7 @@ static void stop_nop_trace(struct trace_array *tr)
/* Nothing to do! */
}
static void nop_trace_init(struct trace_array *tr)
static int nop_trace_init(struct trace_array *tr)
{
int cpu;
ctx_trace = tr;
@@ -32,33 +53,53 @@ static void nop_trace_init(struct trace_array *tr)
for_each_online_cpu(cpu)
tracing_reset(tr, cpu);
if (tr->ctrl)
start_nop_trace(tr);
start_nop_trace(tr);
return 0;
}
static void nop_trace_reset(struct trace_array *tr)
{
if (tr->ctrl)
stop_nop_trace(tr);
stop_nop_trace(tr);
}
static void nop_trace_ctrl_update(struct trace_array *tr)
/* It only serves as a signal handler and a callback to
* accept or refuse tthe setting of a flag.
* If you don't implement it, then the flag setting will be
* automatically accepted.
*/
static int nop_set_flag(u32 old_flags, u32 bit, int set)
{
/* When starting a new trace, reset the buffers */
if (tr->ctrl)
start_nop_trace(tr);
else
stop_nop_trace(tr);
/*
* Note that you don't need to update nop_flags.val yourself.
* The tracing Api will do it automatically if you return 0
*/
if (bit == TRACE_NOP_OPT_ACCEPT) {
printk(KERN_DEBUG "nop_test_accept flag set to %d: we accept."
" Now cat trace_options to see the result\n",
set);
return 0;
}
if (bit == TRACE_NOP_OPT_REFUSE) {
printk(KERN_DEBUG "nop_test_refuse flag set to %d: we refuse."
"Now cat trace_options to see the result\n",
set);
return -EINVAL;
}
return 0;
}
struct tracer nop_trace __read_mostly =
{
.name = "nop",
.init = nop_trace_init,
.reset = nop_trace_reset,
.ctrl_update = nop_trace_ctrl_update,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_nop,
#endif
.flags = &nop_flags,
.set_flag = nop_set_flag
};

179
kernel/trace/trace_power.c Normal file
View File

@@ -0,0 +1,179 @@
/*
* ring buffer based C-state tracer
*
* Arjan van de Ven <arjan@linux.intel.com>
* Copyright (C) 2008 Intel Corporation
*
* Much is borrowed from trace_boot.c which is
* Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
*
*/
#include <linux/init.h>
#include <linux/debugfs.h>
#include <linux/ftrace.h>
#include <linux/kallsyms.h>
#include <linux/module.h>
#include "trace.h"
static struct trace_array *power_trace;
static int __read_mostly trace_power_enabled;
static void start_power_trace(struct trace_array *tr)
{
trace_power_enabled = 1;
}
static void stop_power_trace(struct trace_array *tr)
{
trace_power_enabled = 0;
}
static int power_trace_init(struct trace_array *tr)
{
int cpu;
power_trace = tr;
trace_power_enabled = 1;
for_each_cpu_mask(cpu, cpu_possible_map)
tracing_reset(tr, cpu);
return 0;
}
static enum print_line_t power_print_line(struct trace_iterator *iter)
{
int ret = 0;
struct trace_entry *entry = iter->ent;
struct trace_power *field ;
struct power_trace *it;
struct trace_seq *s = &iter->seq;
struct timespec stamp;
struct timespec duration;
trace_assign_type(field, entry);
it = &field->state_data;
stamp = ktime_to_timespec(it->stamp);
duration = ktime_to_timespec(ktime_sub(it->end, it->stamp));
if (entry->type == TRACE_POWER) {
if (it->type == POWER_CSTATE)
ret = trace_seq_printf(s, "[%5ld.%09ld] CSTATE: Going to C%i on cpu %i for %ld.%09ld\n",
stamp.tv_sec,
stamp.tv_nsec,
it->state, iter->cpu,
duration.tv_sec,
duration.tv_nsec);
if (it->type == POWER_PSTATE)
ret = trace_seq_printf(s, "[%5ld.%09ld] PSTATE: Going to P%i on cpu %i\n",
stamp.tv_sec,
stamp.tv_nsec,
it->state, iter->cpu);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
return TRACE_TYPE_HANDLED;
}
return TRACE_TYPE_UNHANDLED;
}
static struct tracer power_tracer __read_mostly =
{
.name = "power",
.init = power_trace_init,
.start = start_power_trace,
.stop = stop_power_trace,
.reset = stop_power_trace,
.print_line = power_print_line,
};
static int init_power_trace(void)
{
return register_tracer(&power_tracer);
}
device_initcall(init_power_trace);
void trace_power_start(struct power_trace *it, unsigned int type,
unsigned int level)
{
if (!trace_power_enabled)
return;
memset(it, 0, sizeof(struct power_trace));
it->state = level;
it->type = type;
it->stamp = ktime_get();
}
EXPORT_SYMBOL_GPL(trace_power_start);
void trace_power_end(struct power_trace *it)
{
struct ring_buffer_event *event;
struct trace_power *entry;
struct trace_array_cpu *data;
unsigned long irq_flags;
struct trace_array *tr = power_trace;
if (!trace_power_enabled)
return;
preempt_disable();
it->end = ktime_get();
data = tr->data[smp_processor_id()];
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
&irq_flags);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, 0, 0);
entry->ent.type = TRACE_POWER;
entry->state_data = *it;
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
trace_wake_up();
out:
preempt_enable();
}
EXPORT_SYMBOL_GPL(trace_power_end);
void trace_power_mark(struct power_trace *it, unsigned int type,
unsigned int level)
{
struct ring_buffer_event *event;
struct trace_power *entry;
struct trace_array_cpu *data;
unsigned long irq_flags;
struct trace_array *tr = power_trace;
if (!trace_power_enabled)
return;
memset(it, 0, sizeof(struct power_trace));
it->state = level;
it->type = type;
it->stamp = ktime_get();
preempt_disable();
it->end = it->stamp;
data = tr->data[smp_processor_id()];
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
&irq_flags);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, 0, 0);
entry->ent.type = TRACE_POWER;
entry->state_data = *it;
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
trace_wake_up();
out:
preempt_enable();
}
EXPORT_SYMBOL_GPL(trace_power_mark);

View File

@@ -16,7 +16,8 @@
static struct trace_array *ctx_trace;
static int __read_mostly tracer_enabled;
static atomic_t sched_ref;
static int sched_ref;
static DEFINE_MUTEX(sched_register_mutex);
static void
probe_sched_switch(struct rq *__rq, struct task_struct *prev,
@@ -27,7 +28,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
int cpu;
int pc;
if (!atomic_read(&sched_ref))
if (!sched_ref)
return;
tracing_record_cmdline(prev);
@@ -48,7 +49,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
}
static void
probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
{
struct trace_array_cpu *data;
unsigned long flags;
@@ -71,16 +72,6 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
local_irq_restore(flags);
}
static void sched_switch_reset(struct trace_array *tr)
{
int cpu;
tr->time_start = ftrace_now(tr->cpu);
for_each_online_cpu(cpu)
tracing_reset(tr, cpu);
}
static int tracing_sched_register(void)
{
int ret;
@@ -123,20 +114,18 @@ static void tracing_sched_unregister(void)
static void tracing_start_sched_switch(void)
{
long ref;
ref = atomic_inc_return(&sched_ref);
if (ref == 1)
mutex_lock(&sched_register_mutex);
if (!(sched_ref++))
tracing_sched_register();
mutex_unlock(&sched_register_mutex);
}
static void tracing_stop_sched_switch(void)
{
long ref;
ref = atomic_dec_and_test(&sched_ref);
if (ref)
mutex_lock(&sched_register_mutex);
if (!(--sched_ref))
tracing_sched_unregister();
mutex_unlock(&sched_register_mutex);
}
void tracing_start_cmdline_record(void)
@@ -149,40 +138,86 @@ void tracing_stop_cmdline_record(void)
tracing_stop_sched_switch();
}
/**
* tracing_start_sched_switch_record - start tracing context switches
*
* Turns on context switch tracing for a tracer.
*/
void tracing_start_sched_switch_record(void)
{
if (unlikely(!ctx_trace)) {
WARN_ON(1);
return;
}
tracing_start_sched_switch();
mutex_lock(&sched_register_mutex);
tracer_enabled++;
mutex_unlock(&sched_register_mutex);
}
/**
* tracing_stop_sched_switch_record - start tracing context switches
*
* Turns off context switch tracing for a tracer.
*/
void tracing_stop_sched_switch_record(void)
{
mutex_lock(&sched_register_mutex);
tracer_enabled--;
WARN_ON(tracer_enabled < 0);
mutex_unlock(&sched_register_mutex);
tracing_stop_sched_switch();
}
/**
* tracing_sched_switch_assign_trace - assign a trace array for ctx switch
* @tr: trace array pointer to assign
*
* Some tracers might want to record the context switches in their
* trace. This function lets those tracers assign the trace array
* to use.
*/
void tracing_sched_switch_assign_trace(struct trace_array *tr)
{
ctx_trace = tr;
}
static void start_sched_trace(struct trace_array *tr)
{
sched_switch_reset(tr);
tracing_start_cmdline_record();
tracer_enabled = 1;
tracing_reset_online_cpus(tr);
tracing_start_sched_switch_record();
}
static void stop_sched_trace(struct trace_array *tr)
{
tracer_enabled = 0;
tracing_stop_cmdline_record();
tracing_stop_sched_switch_record();
}
static void sched_switch_trace_init(struct trace_array *tr)
static int sched_switch_trace_init(struct trace_array *tr)
{
ctx_trace = tr;
if (tr->ctrl)
start_sched_trace(tr);
start_sched_trace(tr);
return 0;
}
static void sched_switch_trace_reset(struct trace_array *tr)
{
if (tr->ctrl)
if (sched_ref)
stop_sched_trace(tr);
}
static void sched_switch_trace_ctrl_update(struct trace_array *tr)
static void sched_switch_trace_start(struct trace_array *tr)
{
/* When starting a new trace, reset the buffers */
if (tr->ctrl)
start_sched_trace(tr);
else
stop_sched_trace(tr);
tracing_reset_online_cpus(tr);
tracing_start_sched_switch();
}
static void sched_switch_trace_stop(struct trace_array *tr)
{
tracing_stop_sched_switch();
}
static struct tracer sched_switch_trace __read_mostly =
@@ -190,7 +225,8 @@ static struct tracer sched_switch_trace __read_mostly =
.name = "sched_switch",
.init = sched_switch_trace_init,
.reset = sched_switch_trace_reset,
.ctrl_update = sched_switch_trace_ctrl_update,
.start = sched_switch_trace_start,
.stop = sched_switch_trace_stop,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_sched_switch,
#endif
@@ -198,14 +234,7 @@ static struct tracer sched_switch_trace __read_mostly =
__init static int init_sched_switch_trace(void)
{
int ret = 0;
if (atomic_read(&sched_ref))
ret = tracing_sched_register();
if (ret) {
pr_info("error registering scheduler trace\n");
return ret;
}
return register_tracer(&sched_switch_trace);
}
device_initcall(init_sched_switch_trace);

View File

@@ -50,8 +50,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
return;
pc = preempt_count();
resched = need_resched();
preempt_disable_notrace();
resched = ftrace_preempt_disable();
cpu = raw_smp_processor_id();
data = tr->data[cpu];
@@ -81,15 +80,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
out:
atomic_dec(&data->disabled);
/*
* To prevent recursion from the scheduler, if the
* resched flag was set before we entered, then
* don't reschedule.
*/
if (resched)
preempt_enable_no_resched_notrace();
else
preempt_enable_notrace();
ftrace_preempt_enable(resched);
}
static struct ftrace_ops trace_ops __read_mostly =
@@ -220,7 +211,7 @@ static void wakeup_reset(struct trace_array *tr)
}
static void
probe_wakeup(struct rq *rq, struct task_struct *p)
probe_wakeup(struct rq *rq, struct task_struct *p, int success)
{
int cpu = smp_processor_id();
unsigned long flags;
@@ -271,6 +262,12 @@ out:
atomic_dec(&wakeup_trace->data[cpu]->disabled);
}
/*
* save_tracer_enabled is used to save the state of the tracer_enabled
* variable when we disable it when we open a trace output file.
*/
static int save_tracer_enabled;
static void start_wakeup_tracer(struct trace_array *tr)
{
int ret;
@@ -309,7 +306,13 @@ static void start_wakeup_tracer(struct trace_array *tr)
register_ftrace_function(&trace_ops);
tracer_enabled = 1;
if (tracing_is_enabled()) {
tracer_enabled = 1;
save_tracer_enabled = 1;
} else {
tracer_enabled = 0;
save_tracer_enabled = 0;
}
return;
fail_deprobe_wake_new:
@@ -321,49 +324,53 @@ fail_deprobe:
static void stop_wakeup_tracer(struct trace_array *tr)
{
tracer_enabled = 0;
save_tracer_enabled = 0;
unregister_ftrace_function(&trace_ops);
unregister_trace_sched_switch(probe_wakeup_sched_switch);
unregister_trace_sched_wakeup_new(probe_wakeup);
unregister_trace_sched_wakeup(probe_wakeup);
}
static void wakeup_tracer_init(struct trace_array *tr)
static int wakeup_tracer_init(struct trace_array *tr)
{
wakeup_trace = tr;
if (tr->ctrl)
start_wakeup_tracer(tr);
start_wakeup_tracer(tr);
return 0;
}
static void wakeup_tracer_reset(struct trace_array *tr)
{
if (tr->ctrl) {
stop_wakeup_tracer(tr);
/* make sure we put back any tasks we are tracing */
wakeup_reset(tr);
}
stop_wakeup_tracer(tr);
/* make sure we put back any tasks we are tracing */
wakeup_reset(tr);
}
static void wakeup_tracer_ctrl_update(struct trace_array *tr)
static void wakeup_tracer_start(struct trace_array *tr)
{
if (tr->ctrl)
start_wakeup_tracer(tr);
else
stop_wakeup_tracer(tr);
wakeup_reset(tr);
tracer_enabled = 1;
save_tracer_enabled = 1;
}
static void wakeup_tracer_stop(struct trace_array *tr)
{
tracer_enabled = 0;
save_tracer_enabled = 0;
}
static void wakeup_tracer_open(struct trace_iterator *iter)
{
/* stop the trace while dumping */
if (iter->tr->ctrl)
stop_wakeup_tracer(iter->tr);
tracer_enabled = 0;
}
static void wakeup_tracer_close(struct trace_iterator *iter)
{
/* forget about any processes we were recording */
if (iter->tr->ctrl)
start_wakeup_tracer(iter->tr);
if (save_tracer_enabled) {
wakeup_reset(iter->tr);
tracer_enabled = 1;
}
}
static struct tracer wakeup_tracer __read_mostly =
@@ -371,9 +378,10 @@ static struct tracer wakeup_tracer __read_mostly =
.name = "wakeup",
.init = wakeup_tracer_init,
.reset = wakeup_tracer_reset,
.start = wakeup_tracer_start,
.stop = wakeup_tracer_stop,
.open = wakeup_tracer_open,
.close = wakeup_tracer_close,
.ctrl_update = wakeup_tracer_ctrl_update,
.print_max = 1,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_wakeup,

View File

@@ -13,6 +13,7 @@ static inline int trace_valid_entry(struct trace_entry *entry)
case TRACE_STACK:
case TRACE_PRINT:
case TRACE_SPECIAL:
case TRACE_BRANCH:
return 1;
}
return 0;
@@ -51,7 +52,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
int cpu, ret = 0;
/* Don't allow flipping of max traces now */
raw_local_irq_save(flags);
local_irq_save(flags);
__raw_spin_lock(&ftrace_max_lock);
cnt = ring_buffer_entries(tr->buffer);
@@ -62,7 +63,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
break;
}
__raw_spin_unlock(&ftrace_max_lock);
raw_local_irq_restore(flags);
local_irq_restore(flags);
if (count)
*count = cnt;
@@ -70,6 +71,11 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
return ret;
}
static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
{
printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
trace->name, init_ret);
}
#ifdef CONFIG_FUNCTION_TRACER
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -110,8 +116,11 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
ftrace_set_filter(func_name, strlen(func_name), 1);
/* enable tracing */
tr->ctrl = 1;
trace->init(tr);
ret = trace->init(tr);
if (ret) {
warn_failed_init_tracer(trace, ret);
goto out;
}
/* Sleep for a 1/10 of a second */
msleep(100);
@@ -134,13 +143,13 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
msleep(100);
/* stop the tracing. */
tr->ctrl = 0;
trace->ctrl_update(tr);
tracing_stop();
ftrace_enabled = 0;
/* check the trace buffer */
ret = trace_test_buffer(tr, &count);
trace->reset(tr);
tracing_start();
/* we should only have one item */
if (!ret && count != 1) {
@@ -148,6 +157,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
ret = -1;
goto out;
}
out:
ftrace_enabled = save_ftrace_enabled;
tracer_enabled = save_tracer_enabled;
@@ -180,18 +190,22 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
ftrace_enabled = 1;
tracer_enabled = 1;
tr->ctrl = 1;
trace->init(tr);
ret = trace->init(tr);
if (ret) {
warn_failed_init_tracer(trace, ret);
goto out;
}
/* Sleep for a 1/10 of a second */
msleep(100);
/* stop the tracing. */
tr->ctrl = 0;
trace->ctrl_update(tr);
tracing_stop();
ftrace_enabled = 0;
/* check the trace buffer */
ret = trace_test_buffer(tr, &count);
trace->reset(tr);
tracing_start();
if (!ret && !count) {
printk(KERN_CONT ".. no entries found ..");
@@ -223,8 +237,12 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
int ret;
/* start the tracing */
tr->ctrl = 1;
trace->init(tr);
ret = trace->init(tr);
if (ret) {
warn_failed_init_tracer(trace, ret);
return ret;
}
/* reset the max latency */
tracing_max_latency = 0;
/* disable interrupts for a bit */
@@ -232,13 +250,13 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
udelay(100);
local_irq_enable();
/* stop the tracing. */
tr->ctrl = 0;
trace->ctrl_update(tr);
tracing_stop();
/* check both trace buffers */
ret = trace_test_buffer(tr, NULL);
if (!ret)
ret = trace_test_buffer(&max_tr, &count);
trace->reset(tr);
tracing_start();
if (!ret && !count) {
printk(KERN_CONT ".. no entries found ..");
@@ -259,9 +277,26 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
unsigned long count;
int ret;
/*
* Now that the big kernel lock is no longer preemptable,
* and this is called with the BKL held, it will always
* fail. If preemption is already disabled, simply
* pass the test. When the BKL is removed, or becomes
* preemptible again, we will once again test this,
* so keep it in.
*/
if (preempt_count()) {
printk(KERN_CONT "can not test ... force ");
return 0;
}
/* start the tracing */
tr->ctrl = 1;
trace->init(tr);
ret = trace->init(tr);
if (ret) {
warn_failed_init_tracer(trace, ret);
return ret;
}
/* reset the max latency */
tracing_max_latency = 0;
/* disable preemption for a bit */
@@ -269,13 +304,13 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
udelay(100);
preempt_enable();
/* stop the tracing. */
tr->ctrl = 0;
trace->ctrl_update(tr);
tracing_stop();
/* check both trace buffers */
ret = trace_test_buffer(tr, NULL);
if (!ret)
ret = trace_test_buffer(&max_tr, &count);
trace->reset(tr);
tracing_start();
if (!ret && !count) {
printk(KERN_CONT ".. no entries found ..");
@@ -296,9 +331,25 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
unsigned long count;
int ret;
/*
* Now that the big kernel lock is no longer preemptable,
* and this is called with the BKL held, it will always
* fail. If preemption is already disabled, simply
* pass the test. When the BKL is removed, or becomes
* preemptible again, we will once again test this,
* so keep it in.
*/
if (preempt_count()) {
printk(KERN_CONT "can not test ... force ");
return 0;
}
/* start the tracing */
tr->ctrl = 1;
trace->init(tr);
ret = trace->init(tr);
if (ret) {
warn_failed_init_tracer(trace, ret);
goto out;
}
/* reset the max latency */
tracing_max_latency = 0;
@@ -312,27 +363,30 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
local_irq_enable();
/* stop the tracing. */
tr->ctrl = 0;
trace->ctrl_update(tr);
tracing_stop();
/* check both trace buffers */
ret = trace_test_buffer(tr, NULL);
if (ret)
if (ret) {
tracing_start();
goto out;
}
ret = trace_test_buffer(&max_tr, &count);
if (ret)
if (ret) {
tracing_start();
goto out;
}
if (!ret && !count) {
printk(KERN_CONT ".. no entries found ..");
ret = -1;
tracing_start();
goto out;
}
/* do the test by disabling interrupts first this time */
tracing_max_latency = 0;
tr->ctrl = 1;
trace->ctrl_update(tr);
tracing_start();
preempt_disable();
local_irq_disable();
udelay(100);
@@ -341,8 +395,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
local_irq_enable();
/* stop the tracing. */
tr->ctrl = 0;
trace->ctrl_update(tr);
tracing_stop();
/* check both trace buffers */
ret = trace_test_buffer(tr, NULL);
if (ret)
@@ -358,6 +411,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
out:
trace->reset(tr);
tracing_start();
tracing_max_latency = save_max;
return ret;
@@ -423,8 +477,12 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
wait_for_completion(&isrt);
/* start the tracing */
tr->ctrl = 1;
trace->init(tr);
ret = trace->init(tr);
if (ret) {
warn_failed_init_tracer(trace, ret);
return ret;
}
/* reset the max latency */
tracing_max_latency = 0;
@@ -448,8 +506,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
msleep(100);
/* stop the tracing. */
tr->ctrl = 0;
trace->ctrl_update(tr);
tracing_stop();
/* check both trace buffers */
ret = trace_test_buffer(tr, NULL);
if (!ret)
@@ -457,6 +514,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
trace->reset(tr);
tracing_start();
tracing_max_latency = save_max;
@@ -480,16 +538,20 @@ trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr
int ret;
/* start the tracing */
tr->ctrl = 1;
trace->init(tr);
ret = trace->init(tr);
if (ret) {
warn_failed_init_tracer(trace, ret);
return ret;
}
/* Sleep for a 1/10 of a second */
msleep(100);
/* stop the tracing. */
tr->ctrl = 0;
trace->ctrl_update(tr);
tracing_stop();
/* check the trace buffer */
ret = trace_test_buffer(tr, &count);
trace->reset(tr);
tracing_start();
if (!ret && !count) {
printk(KERN_CONT ".. no entries found ..");
@@ -508,17 +570,48 @@ trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
int ret;
/* start the tracing */
tr->ctrl = 1;
trace->init(tr);
ret = trace->init(tr);
if (ret) {
warn_failed_init_tracer(trace, ret);
return 0;
}
/* Sleep for a 1/10 of a second */
msleep(100);
/* stop the tracing. */
tr->ctrl = 0;
trace->ctrl_update(tr);
tracing_stop();
/* check the trace buffer */
ret = trace_test_buffer(tr, &count);
trace->reset(tr);
tracing_start();
return ret;
}
#endif /* CONFIG_SYSPROF_TRACER */
#ifdef CONFIG_BRANCH_TRACER
int
trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
{
unsigned long count;
int ret;
/* start the tracing */
ret = trace->init(tr);
if (ret) {
warn_failed_init_tracer(trace, ret);
return ret;
}
/* Sleep for a 1/10 of a second */
msleep(100);
/* stop the tracing. */
tracing_stop();
/* check the trace buffer */
ret = trace_test_buffer(tr, &count);
trace->reset(tr);
tracing_start();
return ret;
}
#endif /* CONFIG_BRANCH_TRACER */

View File

@@ -10,6 +10,7 @@
#include <linux/debugfs.h>
#include <linux/ftrace.h>
#include <linux/module.h>
#include <linux/sysctl.h>
#include <linux/init.h>
#include <linux/fs.h>
#include "trace.h"
@@ -31,6 +32,10 @@ static raw_spinlock_t max_stack_lock =
static int stack_trace_disabled __read_mostly;
static DEFINE_PER_CPU(int, trace_active);
static DEFINE_MUTEX(stack_sysctl_mutex);
int stack_tracer_enabled;
static int last_stack_tracer_enabled;
static inline void check_stack(void)
{
@@ -48,7 +53,7 @@ static inline void check_stack(void)
if (!object_is_on_stack(&this_size))
return;
raw_local_irq_save(flags);
local_irq_save(flags);
__raw_spin_lock(&max_stack_lock);
/* a race could have already updated it */
@@ -78,6 +83,7 @@ static inline void check_stack(void)
* on a new max, so it is far from a fast path.
*/
while (i < max_stack_trace.nr_entries) {
int found = 0;
stack_dump_index[i] = this_size;
p = start;
@@ -86,17 +92,19 @@ static inline void check_stack(void)
if (*p == stack_dump_trace[i]) {
this_size = stack_dump_index[i++] =
(top - p) * sizeof(unsigned long);
found = 1;
/* Start the search from here */
start = p + 1;
}
}
i++;
if (!found)
i++;
}
out:
__raw_spin_unlock(&max_stack_lock);
raw_local_irq_restore(flags);
local_irq_restore(flags);
}
static void
@@ -107,8 +115,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)
if (unlikely(!ftrace_enabled || stack_trace_disabled))
return;
resched = need_resched();
preempt_disable_notrace();
resched = ftrace_preempt_disable();
cpu = raw_smp_processor_id();
/* no atomic needed, we only modify this variable by this cpu */
@@ -120,10 +127,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)
out:
per_cpu(trace_active, cpu)--;
/* prevent recursion in schedule */
if (resched)
preempt_enable_no_resched_notrace();
else
preempt_enable_notrace();
ftrace_preempt_enable(resched);
}
static struct ftrace_ops trace_ops __read_mostly =
@@ -166,16 +170,16 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
if (ret < 0)
return ret;
raw_local_irq_save(flags);
local_irq_save(flags);
__raw_spin_lock(&max_stack_lock);
*ptr = val;
__raw_spin_unlock(&max_stack_lock);
raw_local_irq_restore(flags);
local_irq_restore(flags);
return count;
}
static struct file_operations stack_max_size_fops = {
static const struct file_operations stack_max_size_fops = {
.open = tracing_open_generic,
.read = stack_max_size_read,
.write = stack_max_size_write,
@@ -273,7 +277,7 @@ static int t_show(struct seq_file *m, void *v)
return 0;
}
static struct seq_operations stack_trace_seq_ops = {
static const struct seq_operations stack_trace_seq_ops = {
.start = t_start,
.next = t_next,
.stop = t_stop,
@@ -289,12 +293,47 @@ static int stack_trace_open(struct inode *inode, struct file *file)
return ret;
}
static struct file_operations stack_trace_fops = {
static const struct file_operations stack_trace_fops = {
.open = stack_trace_open,
.read = seq_read,
.llseek = seq_lseek,
};
int
stack_trace_sysctl(struct ctl_table *table, int write,
struct file *file, void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret;
mutex_lock(&stack_sysctl_mutex);
ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
if (ret || !write ||
(last_stack_tracer_enabled == stack_tracer_enabled))
goto out;
last_stack_tracer_enabled = stack_tracer_enabled;
if (stack_tracer_enabled)
register_ftrace_function(&trace_ops);
else
unregister_ftrace_function(&trace_ops);
out:
mutex_unlock(&stack_sysctl_mutex);
return ret;
}
static __init int enable_stacktrace(char *str)
{
stack_tracer_enabled = 1;
last_stack_tracer_enabled = 1;
return 1;
}
__setup("stacktrace", enable_stacktrace);
static __init int stack_trace_init(void)
{
struct dentry *d_tracer;
@@ -312,7 +351,8 @@ static __init int stack_trace_init(void)
if (!entry)
pr_warning("Could not create debugfs 'stack_trace' entry\n");
register_ftrace_function(&trace_ops);
if (stack_tracer_enabled)
register_ftrace_function(&trace_ops);
return 0;
}

View File

@@ -234,20 +234,10 @@ static void stop_stack_timers(void)
stop_stack_timer(cpu);
}
static void stack_reset(struct trace_array *tr)
{
int cpu;
tr->time_start = ftrace_now(tr->cpu);
for_each_online_cpu(cpu)
tracing_reset(tr, cpu);
}
static void start_stack_trace(struct trace_array *tr)
{
mutex_lock(&sample_timer_lock);
stack_reset(tr);
tracing_reset_online_cpus(tr);
start_stack_timers();
tracer_enabled = 1;
mutex_unlock(&sample_timer_lock);
@@ -261,27 +251,17 @@ static void stop_stack_trace(struct trace_array *tr)
mutex_unlock(&sample_timer_lock);
}
static void stack_trace_init(struct trace_array *tr)
static int stack_trace_init(struct trace_array *tr)
{
sysprof_trace = tr;
if (tr->ctrl)
start_stack_trace(tr);
start_stack_trace(tr);
return 0;
}
static void stack_trace_reset(struct trace_array *tr)
{
if (tr->ctrl)
stop_stack_trace(tr);
}
static void stack_trace_ctrl_update(struct trace_array *tr)
{
/* When starting a new trace, reset the buffers */
if (tr->ctrl)
start_stack_trace(tr);
else
stop_stack_trace(tr);
stop_stack_trace(tr);
}
static struct tracer stack_trace __read_mostly =
@@ -289,7 +269,6 @@ static struct tracer stack_trace __read_mostly =
.name = "sysprof",
.init = stack_trace_init,
.reset = stack_trace_reset,
.ctrl_update = stack_trace_ctrl_update,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_sysprof,
#endif