Merge branches 'tracing/blktrace', 'tracing/ftrace', 'tracing/function-graph-tracer' and 'tracing/power-tracer' into tracing/core
This commit is contained in:
@@ -21,7 +21,7 @@ CFLAGS_REMOVE_cgroup-debug.o = -pg
|
||||
CFLAGS_REMOVE_sched_clock.o = -pg
|
||||
CFLAGS_REMOVE_sched.o = -pg
|
||||
endif
|
||||
ifdef CONFIG_FUNCTION_RET_TRACER
|
||||
ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
CFLAGS_REMOVE_extable.o = -pg # For __kernel_text_address()
|
||||
CFLAGS_REMOVE_module.o = -pg # For __module_text_address()
|
||||
endif
|
||||
|
@@ -140,7 +140,7 @@ void free_task(struct task_struct *tsk)
|
||||
prop_local_destroy_single(&tsk->dirties);
|
||||
free_thread_info(tsk->stack);
|
||||
rt_mutex_debug_task_free(tsk);
|
||||
ftrace_retfunc_exit_task(tsk);
|
||||
ftrace_graph_exit_task(tsk);
|
||||
free_task_struct(tsk);
|
||||
}
|
||||
EXPORT_SYMBOL(free_task);
|
||||
@@ -1271,7 +1271,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
||||
total_forks++;
|
||||
spin_unlock(¤t->sighand->siglock);
|
||||
write_unlock_irq(&tasklist_lock);
|
||||
ftrace_retfunc_init_task(p);
|
||||
ftrace_graph_init_task(p);
|
||||
proc_fork_connector(p);
|
||||
cgroup_post_fork(p);
|
||||
return p;
|
||||
|
@@ -5901,7 +5901,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
|
||||
* The idle tasks have their own, simple scheduling class:
|
||||
*/
|
||||
idle->sched_class = &idle_sched_class;
|
||||
ftrace_retfunc_init_task(idle);
|
||||
ftrace_graph_init_task(idle);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -12,7 +12,7 @@ config NOP_TRACER
|
||||
config HAVE_FUNCTION_TRACER
|
||||
bool
|
||||
|
||||
config HAVE_FUNCTION_RET_TRACER
|
||||
config HAVE_FUNCTION_GRAPH_TRACER
|
||||
bool
|
||||
|
||||
config HAVE_FUNCTION_TRACE_MCOUNT_TEST
|
||||
@@ -63,15 +63,18 @@ config FUNCTION_TRACER
|
||||
(the bootup default), then the overhead of the instructions is very
|
||||
small and not measurable even in micro-benchmarks.
|
||||
|
||||
config FUNCTION_RET_TRACER
|
||||
bool "Kernel Function return Tracer"
|
||||
depends on HAVE_FUNCTION_RET_TRACER
|
||||
config FUNCTION_GRAPH_TRACER
|
||||
bool "Kernel Function Graph Tracer"
|
||||
depends on HAVE_FUNCTION_GRAPH_TRACER
|
||||
depends on FUNCTION_TRACER
|
||||
help
|
||||
Enable the kernel to trace a function at its return.
|
||||
It's first purpose is to trace the duration of functions.
|
||||
This is done by setting the current return address on the thread
|
||||
info structure of the current task.
|
||||
Enable the kernel to trace a function at both its return
|
||||
and its entry.
|
||||
It's first purpose is to trace the duration of functions and
|
||||
draw a call graph for each thread with some informations like
|
||||
the return value.
|
||||
This is done by setting the current return address on the current
|
||||
task structure into a stack of calls.
|
||||
|
||||
config IRQSOFF_TRACER
|
||||
bool "Interrupts-off Latency Tracer"
|
||||
@@ -217,6 +220,17 @@ config BRANCH_TRACER
|
||||
|
||||
Say N if unsure.
|
||||
|
||||
config POWER_TRACER
|
||||
bool "Trace power consumption behavior"
|
||||
depends on DEBUG_KERNEL
|
||||
depends on X86
|
||||
select TRACING
|
||||
help
|
||||
This tracer helps developers to analyze and optimize the kernels
|
||||
power management decisions, specifically the C-state and P-state
|
||||
behavior.
|
||||
|
||||
|
||||
config STACK_TRACER
|
||||
bool "Trace max stack"
|
||||
depends on HAVE_FUNCTION_TRACER
|
||||
|
@@ -29,8 +29,9 @@ obj-$(CONFIG_NOP_TRACER) += trace_nop.o
|
||||
obj-$(CONFIG_STACK_TRACER) += trace_stack.o
|
||||
obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
|
||||
obj-$(CONFIG_BOOT_TRACER) += trace_boot.o
|
||||
obj-$(CONFIG_FUNCTION_RET_TRACER) += trace_functions_return.o
|
||||
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o
|
||||
obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o
|
||||
obj-$(CONFIG_BTS_TRACER) += trace_bts.o
|
||||
obj-$(CONFIG_POWER_TRACER) += trace_power.o
|
||||
|
||||
libftrace-y := ftrace.o
|
||||
|
@@ -47,12 +47,12 @@
|
||||
int ftrace_enabled __read_mostly;
|
||||
static int last_ftrace_enabled;
|
||||
|
||||
/* ftrace_pid_trace >= 0 will only trace threads with this pid */
|
||||
static int ftrace_pid_trace = -1;
|
||||
|
||||
/* Quick disabling of function tracer. */
|
||||
int function_trace_stop;
|
||||
|
||||
/* By default, current tracing type is normal tracing. */
|
||||
enum ftrace_tracing_type_t ftrace_tracing_type = FTRACE_TYPE_ENTER;
|
||||
|
||||
/*
|
||||
* ftrace_disabled is set when an anomaly is discovered.
|
||||
* ftrace_disabled is much stronger than ftrace_enabled.
|
||||
@@ -61,6 +61,7 @@ static int ftrace_disabled __read_mostly;
|
||||
|
||||
static DEFINE_SPINLOCK(ftrace_lock);
|
||||
static DEFINE_MUTEX(ftrace_sysctl_lock);
|
||||
static DEFINE_MUTEX(ftrace_start_lock);
|
||||
|
||||
static struct ftrace_ops ftrace_list_end __read_mostly =
|
||||
{
|
||||
@@ -70,6 +71,7 @@ static struct ftrace_ops ftrace_list_end __read_mostly =
|
||||
static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
|
||||
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
|
||||
ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
|
||||
ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
|
||||
|
||||
static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
|
||||
{
|
||||
@@ -86,6 +88,21 @@ static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
|
||||
};
|
||||
}
|
||||
|
||||
static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
|
||||
{
|
||||
if (current->pid != ftrace_pid_trace)
|
||||
return;
|
||||
|
||||
ftrace_pid_function(ip, parent_ip);
|
||||
}
|
||||
|
||||
static void set_ftrace_pid_function(ftrace_func_t func)
|
||||
{
|
||||
/* do not set ftrace_pid_function to itself! */
|
||||
if (func != ftrace_pid_func)
|
||||
ftrace_pid_function = func;
|
||||
}
|
||||
|
||||
/**
|
||||
* clear_ftrace_function - reset the ftrace function
|
||||
*
|
||||
@@ -96,6 +113,7 @@ void clear_ftrace_function(void)
|
||||
{
|
||||
ftrace_trace_function = ftrace_stub;
|
||||
__ftrace_trace_function = ftrace_stub;
|
||||
ftrace_pid_function = ftrace_stub;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
|
||||
@@ -128,20 +146,26 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
|
||||
ftrace_list = ops;
|
||||
|
||||
if (ftrace_enabled) {
|
||||
ftrace_func_t func;
|
||||
|
||||
if (ops->next == &ftrace_list_end)
|
||||
func = ops->func;
|
||||
else
|
||||
func = ftrace_list_func;
|
||||
|
||||
if (ftrace_pid_trace >= 0) {
|
||||
set_ftrace_pid_function(func);
|
||||
func = ftrace_pid_func;
|
||||
}
|
||||
|
||||
/*
|
||||
* For one func, simply call it directly.
|
||||
* For more than one func, call the chain.
|
||||
*/
|
||||
#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
|
||||
if (ops->next == &ftrace_list_end)
|
||||
ftrace_trace_function = ops->func;
|
||||
else
|
||||
ftrace_trace_function = ftrace_list_func;
|
||||
ftrace_trace_function = func;
|
||||
#else
|
||||
if (ops->next == &ftrace_list_end)
|
||||
__ftrace_trace_function = ops->func;
|
||||
else
|
||||
__ftrace_trace_function = ftrace_list_func;
|
||||
__ftrace_trace_function = func;
|
||||
ftrace_trace_function = ftrace_test_stop_func;
|
||||
#endif
|
||||
}
|
||||
@@ -182,8 +206,19 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
|
||||
|
||||
if (ftrace_enabled) {
|
||||
/* If we only have one func left, then call that directly */
|
||||
if (ftrace_list->next == &ftrace_list_end)
|
||||
ftrace_trace_function = ftrace_list->func;
|
||||
if (ftrace_list->next == &ftrace_list_end) {
|
||||
ftrace_func_t func = ftrace_list->func;
|
||||
|
||||
if (ftrace_pid_trace >= 0) {
|
||||
set_ftrace_pid_function(func);
|
||||
func = ftrace_pid_func;
|
||||
}
|
||||
#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
|
||||
ftrace_trace_function = func;
|
||||
#else
|
||||
__ftrace_trace_function = func;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
@@ -192,6 +227,38 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ftrace_update_pid_func(void)
|
||||
{
|
||||
ftrace_func_t func;
|
||||
|
||||
/* should not be called from interrupt context */
|
||||
spin_lock(&ftrace_lock);
|
||||
|
||||
if (ftrace_trace_function == ftrace_stub)
|
||||
goto out;
|
||||
|
||||
func = ftrace_trace_function;
|
||||
|
||||
if (ftrace_pid_trace >= 0) {
|
||||
set_ftrace_pid_function(func);
|
||||
func = ftrace_pid_func;
|
||||
} else {
|
||||
if (func != ftrace_pid_func)
|
||||
goto out;
|
||||
|
||||
set_ftrace_pid_function(func);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
|
||||
ftrace_trace_function = func;
|
||||
#else
|
||||
__ftrace_trace_function = func;
|
||||
#endif
|
||||
|
||||
out:
|
||||
spin_unlock(&ftrace_lock);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
#ifndef CONFIG_FTRACE_MCOUNT_RECORD
|
||||
# error Dynamic ftrace depends on MCOUNT_RECORD
|
||||
@@ -211,6 +278,8 @@ enum {
|
||||
FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
|
||||
FTRACE_ENABLE_MCOUNT = (1 << 3),
|
||||
FTRACE_DISABLE_MCOUNT = (1 << 4),
|
||||
FTRACE_START_FUNC_RET = (1 << 5),
|
||||
FTRACE_STOP_FUNC_RET = (1 << 6),
|
||||
};
|
||||
|
||||
static int ftrace_filtered;
|
||||
@@ -395,14 +464,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
|
||||
unsigned long ip, fl;
|
||||
unsigned long ftrace_addr;
|
||||
|
||||
#ifdef CONFIG_FUNCTION_RET_TRACER
|
||||
if (ftrace_tracing_type == FTRACE_TYPE_ENTER)
|
||||
ftrace_addr = (unsigned long)ftrace_caller;
|
||||
else
|
||||
ftrace_addr = (unsigned long)ftrace_return_caller;
|
||||
#else
|
||||
ftrace_addr = (unsigned long)ftrace_caller;
|
||||
#endif
|
||||
|
||||
ip = rec->ip;
|
||||
|
||||
@@ -535,6 +597,11 @@ static int __ftrace_modify_code(void *data)
|
||||
if (*command & FTRACE_UPDATE_TRACE_FUNC)
|
||||
ftrace_update_ftrace_func(ftrace_trace_function);
|
||||
|
||||
if (*command & FTRACE_START_FUNC_RET)
|
||||
ftrace_enable_ftrace_graph_caller();
|
||||
else if (*command & FTRACE_STOP_FUNC_RET)
|
||||
ftrace_disable_ftrace_graph_caller();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -545,12 +612,22 @@ static void ftrace_run_update_code(int command)
|
||||
|
||||
static ftrace_func_t saved_ftrace_func;
|
||||
static int ftrace_start_up;
|
||||
static DEFINE_MUTEX(ftrace_start_lock);
|
||||
|
||||
static void ftrace_startup(void)
|
||||
static void ftrace_startup_enable(int command)
|
||||
{
|
||||
int command = 0;
|
||||
if (saved_ftrace_func != ftrace_trace_function) {
|
||||
saved_ftrace_func = ftrace_trace_function;
|
||||
command |= FTRACE_UPDATE_TRACE_FUNC;
|
||||
}
|
||||
|
||||
if (!command || !ftrace_enabled)
|
||||
return;
|
||||
|
||||
ftrace_run_update_code(command);
|
||||
}
|
||||
|
||||
static void ftrace_startup(int command)
|
||||
{
|
||||
if (unlikely(ftrace_disabled))
|
||||
return;
|
||||
|
||||
@@ -558,23 +635,13 @@ static void ftrace_startup(void)
|
||||
ftrace_start_up++;
|
||||
command |= FTRACE_ENABLE_CALLS;
|
||||
|
||||
if (saved_ftrace_func != ftrace_trace_function) {
|
||||
saved_ftrace_func = ftrace_trace_function;
|
||||
command |= FTRACE_UPDATE_TRACE_FUNC;
|
||||
}
|
||||
ftrace_startup_enable(command);
|
||||
|
||||
if (!command || !ftrace_enabled)
|
||||
goto out;
|
||||
|
||||
ftrace_run_update_code(command);
|
||||
out:
|
||||
mutex_unlock(&ftrace_start_lock);
|
||||
}
|
||||
|
||||
static void ftrace_shutdown(void)
|
||||
static void ftrace_shutdown(int command)
|
||||
{
|
||||
int command = 0;
|
||||
|
||||
if (unlikely(ftrace_disabled))
|
||||
return;
|
||||
|
||||
@@ -1262,13 +1329,10 @@ static struct file_operations ftrace_notrace_fops = {
|
||||
.release = ftrace_notrace_release,
|
||||
};
|
||||
|
||||
static __init int ftrace_init_debugfs(void)
|
||||
static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
|
||||
{
|
||||
struct dentry *d_tracer;
|
||||
struct dentry *entry;
|
||||
|
||||
d_tracer = tracing_init_dentry();
|
||||
|
||||
entry = debugfs_create_file("available_filter_functions", 0444,
|
||||
d_tracer, NULL, &ftrace_avail_fops);
|
||||
if (!entry)
|
||||
@@ -1295,8 +1359,6 @@ static __init int ftrace_init_debugfs(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
fs_initcall(ftrace_init_debugfs);
|
||||
|
||||
static int ftrace_convert_nops(struct module *mod,
|
||||
unsigned long *start,
|
||||
unsigned long *end)
|
||||
@@ -1382,12 +1444,101 @@ static int __init ftrace_nodyn_init(void)
|
||||
}
|
||||
device_initcall(ftrace_nodyn_init);
|
||||
|
||||
# define ftrace_startup() do { } while (0)
|
||||
# define ftrace_shutdown() do { } while (0)
|
||||
static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
|
||||
static inline void ftrace_startup_enable(int command) { }
|
||||
/* Keep as macros so we do not need to define the commands */
|
||||
# define ftrace_startup(command) do { } while (0)
|
||||
# define ftrace_shutdown(command) do { } while (0)
|
||||
# define ftrace_startup_sysctl() do { } while (0)
|
||||
# define ftrace_shutdown_sysctl() do { } while (0)
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
|
||||
static ssize_t
|
||||
ftrace_pid_read(struct file *file, char __user *ubuf,
|
||||
size_t cnt, loff_t *ppos)
|
||||
{
|
||||
char buf[64];
|
||||
int r;
|
||||
|
||||
if (ftrace_pid_trace >= 0)
|
||||
r = sprintf(buf, "%u\n", ftrace_pid_trace);
|
||||
else
|
||||
r = sprintf(buf, "no pid\n");
|
||||
|
||||
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
ftrace_pid_write(struct file *filp, const char __user *ubuf,
|
||||
size_t cnt, loff_t *ppos)
|
||||
{
|
||||
char buf[64];
|
||||
long val;
|
||||
int ret;
|
||||
|
||||
if (cnt >= sizeof(buf))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&buf, ubuf, cnt))
|
||||
return -EFAULT;
|
||||
|
||||
buf[cnt] = 0;
|
||||
|
||||
ret = strict_strtol(buf, 10, &val);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&ftrace_start_lock);
|
||||
if (ret < 0) {
|
||||
/* disable pid tracing */
|
||||
if (ftrace_pid_trace < 0)
|
||||
goto out;
|
||||
ftrace_pid_trace = -1;
|
||||
|
||||
} else {
|
||||
|
||||
if (ftrace_pid_trace == val)
|
||||
goto out;
|
||||
|
||||
ftrace_pid_trace = val;
|
||||
}
|
||||
|
||||
/* update the function call */
|
||||
ftrace_update_pid_func();
|
||||
ftrace_startup_enable(0);
|
||||
|
||||
out:
|
||||
mutex_unlock(&ftrace_start_lock);
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
static struct file_operations ftrace_pid_fops = {
|
||||
.read = ftrace_pid_read,
|
||||
.write = ftrace_pid_write,
|
||||
};
|
||||
|
||||
static __init int ftrace_init_debugfs(void)
|
||||
{
|
||||
struct dentry *d_tracer;
|
||||
struct dentry *entry;
|
||||
|
||||
d_tracer = tracing_init_dentry();
|
||||
if (!d_tracer)
|
||||
return 0;
|
||||
|
||||
ftrace_init_dyn_debugfs(d_tracer);
|
||||
|
||||
entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
|
||||
NULL, &ftrace_pid_fops);
|
||||
if (!entry)
|
||||
pr_warning("Could not create debugfs "
|
||||
"'set_ftrace_pid' entry\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
fs_initcall(ftrace_init_debugfs);
|
||||
|
||||
/**
|
||||
* ftrace_kill - kill ftrace
|
||||
*
|
||||
@@ -1422,15 +1573,9 @@ int register_ftrace_function(struct ftrace_ops *ops)
|
||||
|
||||
mutex_lock(&ftrace_sysctl_lock);
|
||||
|
||||
if (ftrace_tracing_type == FTRACE_TYPE_RETURN) {
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = __register_ftrace_function(ops);
|
||||
ftrace_startup();
|
||||
ftrace_startup(0);
|
||||
|
||||
out:
|
||||
mutex_unlock(&ftrace_sysctl_lock);
|
||||
return ret;
|
||||
}
|
||||
@@ -1447,7 +1592,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
|
||||
|
||||
mutex_lock(&ftrace_sysctl_lock);
|
||||
ret = __unregister_ftrace_function(ops);
|
||||
ftrace_shutdown();
|
||||
ftrace_shutdown(0);
|
||||
mutex_unlock(&ftrace_sysctl_lock);
|
||||
|
||||
return ret;
|
||||
@@ -1496,14 +1641,15 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FUNCTION_RET_TRACER
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
|
||||
static atomic_t ftrace_retfunc_active;
|
||||
|
||||
/* The callback that hooks the return of a function */
|
||||
trace_function_return_t ftrace_function_return =
|
||||
(trace_function_return_t)ftrace_stub;
|
||||
static atomic_t ftrace_graph_active;
|
||||
|
||||
/* The callbacks that hook a function */
|
||||
trace_func_graph_ret_t ftrace_graph_return =
|
||||
(trace_func_graph_ret_t)ftrace_stub;
|
||||
trace_func_graph_ent_t ftrace_graph_entry =
|
||||
(trace_func_graph_ent_t)ftrace_stub;
|
||||
|
||||
/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
|
||||
static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
|
||||
@@ -1549,7 +1695,7 @@ free:
|
||||
}
|
||||
|
||||
/* Allocate a return stack for each task */
|
||||
static int start_return_tracing(void)
|
||||
static int start_graph_tracing(void)
|
||||
{
|
||||
struct ftrace_ret_stack **ret_stack_list;
|
||||
int ret;
|
||||
@@ -1569,52 +1715,46 @@ static int start_return_tracing(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int register_ftrace_return(trace_function_return_t func)
|
||||
int register_ftrace_graph(trace_func_graph_ret_t retfunc,
|
||||
trace_func_graph_ent_t entryfunc)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&ftrace_sysctl_lock);
|
||||
|
||||
/*
|
||||
* Don't launch return tracing if normal function
|
||||
* tracing is already running.
|
||||
*/
|
||||
if (ftrace_trace_function != ftrace_stub) {
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
atomic_inc(&ftrace_retfunc_active);
|
||||
ret = start_return_tracing();
|
||||
atomic_inc(&ftrace_graph_active);
|
||||
ret = start_graph_tracing();
|
||||
if (ret) {
|
||||
atomic_dec(&ftrace_retfunc_active);
|
||||
atomic_dec(&ftrace_graph_active);
|
||||
goto out;
|
||||
}
|
||||
ftrace_tracing_type = FTRACE_TYPE_RETURN;
|
||||
ftrace_function_return = func;
|
||||
ftrace_startup();
|
||||
|
||||
ftrace_graph_return = retfunc;
|
||||
ftrace_graph_entry = entryfunc;
|
||||
|
||||
ftrace_startup(FTRACE_START_FUNC_RET);
|
||||
|
||||
out:
|
||||
mutex_unlock(&ftrace_sysctl_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void unregister_ftrace_return(void)
|
||||
void unregister_ftrace_graph(void)
|
||||
{
|
||||
mutex_lock(&ftrace_sysctl_lock);
|
||||
|
||||
atomic_dec(&ftrace_retfunc_active);
|
||||
ftrace_function_return = (trace_function_return_t)ftrace_stub;
|
||||
ftrace_shutdown();
|
||||
/* Restore normal tracing type */
|
||||
ftrace_tracing_type = FTRACE_TYPE_ENTER;
|
||||
atomic_dec(&ftrace_graph_active);
|
||||
ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
|
||||
ftrace_graph_entry = (trace_func_graph_ent_t)ftrace_stub;
|
||||
ftrace_shutdown(FTRACE_STOP_FUNC_RET);
|
||||
|
||||
mutex_unlock(&ftrace_sysctl_lock);
|
||||
}
|
||||
|
||||
/* Allocate a return stack for newly created task */
|
||||
void ftrace_retfunc_init_task(struct task_struct *t)
|
||||
void ftrace_graph_init_task(struct task_struct *t)
|
||||
{
|
||||
if (atomic_read(&ftrace_retfunc_active)) {
|
||||
if (atomic_read(&ftrace_graph_active)) {
|
||||
t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
|
||||
* sizeof(struct ftrace_ret_stack),
|
||||
GFP_KERNEL);
|
||||
@@ -1626,7 +1766,7 @@ void ftrace_retfunc_init_task(struct task_struct *t)
|
||||
t->ret_stack = NULL;
|
||||
}
|
||||
|
||||
void ftrace_retfunc_exit_task(struct task_struct *t)
|
||||
void ftrace_graph_exit_task(struct task_struct *t)
|
||||
{
|
||||
struct ftrace_ret_stack *ret_stack = t->ret_stack;
|
||||
|
||||
@@ -1638,5 +1778,3 @@ void ftrace_retfunc_exit_task(struct task_struct *t)
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
|
@@ -804,7 +804,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
|
||||
spin_unlock(&trace_cmdline_lock);
|
||||
}
|
||||
|
||||
static char *trace_find_cmdline(int pid)
|
||||
char *trace_find_cmdline(int pid)
|
||||
{
|
||||
char *cmdline = "<...>";
|
||||
unsigned map;
|
||||
@@ -878,15 +878,15 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
|
||||
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FUNCTION_RET_TRACER
|
||||
static void __trace_function_return(struct trace_array *tr,
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
static void __trace_graph_entry(struct trace_array *tr,
|
||||
struct trace_array_cpu *data,
|
||||
struct ftrace_retfunc *trace,
|
||||
struct ftrace_graph_ent *trace,
|
||||
unsigned long flags,
|
||||
int pc)
|
||||
{
|
||||
struct ring_buffer_event *event;
|
||||
struct ftrace_ret_entry *entry;
|
||||
struct ftrace_graph_ent_entry *entry;
|
||||
unsigned long irq_flags;
|
||||
|
||||
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
|
||||
@@ -898,12 +898,32 @@ static void __trace_function_return(struct trace_array *tr,
|
||||
return;
|
||||
entry = ring_buffer_event_data(event);
|
||||
tracing_generic_entry_update(&entry->ent, flags, pc);
|
||||
entry->ent.type = TRACE_FN_RET;
|
||||
entry->ip = trace->func;
|
||||
entry->parent_ip = trace->ret;
|
||||
entry->rettime = trace->rettime;
|
||||
entry->calltime = trace->calltime;
|
||||
entry->overrun = trace->overrun;
|
||||
entry->ent.type = TRACE_GRAPH_ENT;
|
||||
entry->graph_ent = *trace;
|
||||
ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
|
||||
}
|
||||
|
||||
static void __trace_graph_return(struct trace_array *tr,
|
||||
struct trace_array_cpu *data,
|
||||
struct ftrace_graph_ret *trace,
|
||||
unsigned long flags,
|
||||
int pc)
|
||||
{
|
||||
struct ring_buffer_event *event;
|
||||
struct ftrace_graph_ret_entry *entry;
|
||||
unsigned long irq_flags;
|
||||
|
||||
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
|
||||
return;
|
||||
|
||||
event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
|
||||
&irq_flags);
|
||||
if (!event)
|
||||
return;
|
||||
entry = ring_buffer_event_data(event);
|
||||
tracing_generic_entry_update(&entry->ent, flags, pc);
|
||||
entry->ent.type = TRACE_GRAPH_RET;
|
||||
entry->ret = *trace;
|
||||
ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
|
||||
}
|
||||
#endif
|
||||
@@ -1177,8 +1197,8 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FUNCTION_RET_TRACER
|
||||
void trace_function_return(struct ftrace_retfunc *trace)
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
void trace_graph_entry(struct ftrace_graph_ent *trace)
|
||||
{
|
||||
struct trace_array *tr = &global_trace;
|
||||
struct trace_array_cpu *data;
|
||||
@@ -1193,12 +1213,33 @@ void trace_function_return(struct ftrace_retfunc *trace)
|
||||
disabled = atomic_inc_return(&data->disabled);
|
||||
if (likely(disabled == 1)) {
|
||||
pc = preempt_count();
|
||||
__trace_function_return(tr, data, trace, flags, pc);
|
||||
__trace_graph_entry(tr, data, trace, flags, pc);
|
||||
}
|
||||
atomic_dec(&data->disabled);
|
||||
raw_local_irq_restore(flags);
|
||||
}
|
||||
#endif /* CONFIG_FUNCTION_RET_TRACER */
|
||||
|
||||
void trace_graph_return(struct ftrace_graph_ret *trace)
|
||||
{
|
||||
struct trace_array *tr = &global_trace;
|
||||
struct trace_array_cpu *data;
|
||||
unsigned long flags;
|
||||
long disabled;
|
||||
int cpu;
|
||||
int pc;
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
cpu = raw_smp_processor_id();
|
||||
data = tr->data[cpu];
|
||||
disabled = atomic_inc_return(&data->disabled);
|
||||
if (likely(disabled == 1)) {
|
||||
pc = preempt_count();
|
||||
__trace_graph_return(tr, data, trace, flags, pc);
|
||||
}
|
||||
atomic_dec(&data->disabled);
|
||||
raw_local_irq_restore(flags);
|
||||
}
|
||||
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
||||
|
||||
static struct ftrace_ops trace_ops __read_mostly =
|
||||
{
|
||||
@@ -2000,9 +2041,11 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
|
||||
trace_seq_print_cont(s, iter);
|
||||
break;
|
||||
}
|
||||
case TRACE_FN_RET: {
|
||||
return print_return_function(iter);
|
||||
break;
|
||||
case TRACE_GRAPH_RET: {
|
||||
return print_graph_function(iter);
|
||||
}
|
||||
case TRACE_GRAPH_ENT: {
|
||||
return print_graph_function(iter);
|
||||
}
|
||||
case TRACE_BRANCH: {
|
||||
struct trace_branch *field;
|
||||
|
@@ -25,9 +25,11 @@ enum trace_type {
|
||||
TRACE_BRANCH,
|
||||
TRACE_BOOT_CALL,
|
||||
TRACE_BOOT_RET,
|
||||
TRACE_FN_RET,
|
||||
TRACE_GRAPH_RET,
|
||||
TRACE_GRAPH_ENT,
|
||||
TRACE_USER_STACK,
|
||||
TRACE_BTS,
|
||||
TRACE_POWER,
|
||||
|
||||
__TRACE_LAST_TYPE
|
||||
};
|
||||
@@ -56,14 +58,16 @@ struct ftrace_entry {
|
||||
unsigned long parent_ip;
|
||||
};
|
||||
|
||||
/* Function call entry */
|
||||
struct ftrace_graph_ent_entry {
|
||||
struct trace_entry ent;
|
||||
struct ftrace_graph_ent graph_ent;
|
||||
};
|
||||
|
||||
/* Function return entry */
|
||||
struct ftrace_ret_entry {
|
||||
struct trace_entry ent;
|
||||
unsigned long ip;
|
||||
unsigned long parent_ip;
|
||||
unsigned long long calltime;
|
||||
unsigned long long rettime;
|
||||
unsigned long overrun;
|
||||
struct ftrace_graph_ret_entry {
|
||||
struct trace_entry ent;
|
||||
struct ftrace_graph_ret ret;
|
||||
};
|
||||
extern struct tracer boot_tracer;
|
||||
|
||||
@@ -160,6 +164,11 @@ struct bts_entry {
|
||||
unsigned long to;
|
||||
};
|
||||
|
||||
struct trace_power {
|
||||
struct trace_entry ent;
|
||||
struct power_trace state_data;
|
||||
};
|
||||
|
||||
/*
|
||||
* trace_flag_type is an enumeration that holds different
|
||||
* states when a trace occurs. These are:
|
||||
@@ -264,8 +273,12 @@ extern void __ftrace_bad_type(void);
|
||||
IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
|
||||
IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
|
||||
IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
|
||||
IF_ASSIGN(var, ent, struct ftrace_ret_entry, TRACE_FN_RET);\
|
||||
IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
|
||||
TRACE_GRAPH_ENT); \
|
||||
IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
|
||||
TRACE_GRAPH_RET); \
|
||||
IF_ASSIGN(var, ent, struct bts_entry, TRACE_BTS);\
|
||||
IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
|
||||
__ftrace_bad_type(); \
|
||||
} while (0)
|
||||
|
||||
@@ -397,9 +410,9 @@ void trace_function(struct trace_array *tr,
|
||||
unsigned long ip,
|
||||
unsigned long parent_ip,
|
||||
unsigned long flags, int pc);
|
||||
void
|
||||
trace_function_return(struct ftrace_retfunc *trace);
|
||||
|
||||
void trace_graph_return(struct ftrace_graph_ret *trace);
|
||||
void trace_graph_entry(struct ftrace_graph_ent *trace);
|
||||
void trace_bts(struct trace_array *tr,
|
||||
unsigned long from,
|
||||
unsigned long to);
|
||||
@@ -444,6 +457,7 @@ struct tracer_switch_ops {
|
||||
struct tracer_switch_ops *next;
|
||||
};
|
||||
|
||||
char *trace_find_cmdline(int pid);
|
||||
#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
@@ -489,11 +503,11 @@ extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args);
|
||||
extern unsigned long trace_flags;
|
||||
|
||||
/* Standard output formatting function used for function return traces */
|
||||
#ifdef CONFIG_FUNCTION_RET_TRACER
|
||||
extern enum print_line_t print_return_function(struct trace_iterator *iter);
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
extern enum print_line_t print_graph_function(struct trace_iterator *iter);
|
||||
#else
|
||||
static inline enum print_line_t
|
||||
print_return_function(struct trace_iterator *iter)
|
||||
print_graph_function(struct trace_iterator *iter)
|
||||
{
|
||||
return TRACE_TYPE_UNHANDLED;
|
||||
}
|
||||
|
175
kernel/trace/trace_functions_graph.c
Normal file
175
kernel/trace/trace_functions_graph.c
Normal file
@@ -0,0 +1,175 @@
|
||||
/*
|
||||
*
|
||||
* Function graph tracer.
|
||||
* Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
|
||||
* Mostly borrowed from function tracer which
|
||||
* is Copyright (c) Steven Rostedt <srostedt@redhat.com>
|
||||
*
|
||||
*/
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/fs.h>
|
||||
|
||||
#include "trace.h"
|
||||
|
||||
#define TRACE_GRAPH_INDENT 2
|
||||
|
||||
#define TRACE_GRAPH_PRINT_OVERRUN 0x1
|
||||
static struct tracer_opt trace_opts[] = {
|
||||
/* Display overruns or not */
|
||||
{ TRACER_OPT(overrun, TRACE_GRAPH_PRINT_OVERRUN) },
|
||||
{ } /* Empty entry */
|
||||
};
|
||||
|
||||
static struct tracer_flags tracer_flags = {
|
||||
.val = 0, /* Don't display overruns by default */
|
||||
.opts = trace_opts
|
||||
};
|
||||
|
||||
/* pid on the last trace processed */
|
||||
static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 };
|
||||
|
||||
static int graph_trace_init(struct trace_array *tr)
|
||||
{
|
||||
int cpu, ret;
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
tracing_reset(tr, cpu);
|
||||
|
||||
ret = register_ftrace_graph(&trace_graph_return,
|
||||
&trace_graph_entry);
|
||||
if (ret)
|
||||
return ret;
|
||||
tracing_start_cmdline_record();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void graph_trace_reset(struct trace_array *tr)
|
||||
{
|
||||
tracing_stop_cmdline_record();
|
||||
unregister_ftrace_graph();
|
||||
}
|
||||
|
||||
/* If the pid changed since the last trace, output this event */
|
||||
static int verif_pid(struct trace_seq *s, pid_t pid, int cpu)
|
||||
{
|
||||
char *comm;
|
||||
|
||||
if (last_pid[cpu] != -1 && last_pid[cpu] == pid)
|
||||
return 1;
|
||||
|
||||
last_pid[cpu] = pid;
|
||||
comm = trace_find_cmdline(pid);
|
||||
|
||||
return trace_seq_printf(s, "\nCPU[%03d]"
|
||||
" ------------8<---------- thread %s-%d"
|
||||
" ------------8<----------\n\n",
|
||||
cpu, comm, pid);
|
||||
}
|
||||
|
||||
static enum print_line_t
|
||||
print_graph_entry(struct ftrace_graph_ent *call, struct trace_seq *s,
|
||||
struct trace_entry *ent, int cpu)
|
||||
{
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
if (!verif_pid(s, ent->pid, cpu))
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
ret = trace_seq_printf(s, "CPU[%03d] ", cpu);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
|
||||
ret = trace_seq_printf(s, " ");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
ret = seq_print_ip_sym(s, call->func, 0);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
ret = trace_seq_printf(s, "() {\n");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
return TRACE_TYPE_HANDLED;
|
||||
}
|
||||
|
||||
static enum print_line_t
|
||||
print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
|
||||
struct trace_entry *ent, int cpu)
|
||||
{
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
if (!verif_pid(s, ent->pid, cpu))
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
ret = trace_seq_printf(s, "CPU[%03d] ", cpu);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
|
||||
ret = trace_seq_printf(s, " ");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
ret = trace_seq_printf(s, "} ");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
ret = trace_seq_printf(s, "%llu\n", trace->rettime - trace->calltime);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
|
||||
ret = trace_seq_printf(s, " (Overruns: %lu)\n",
|
||||
trace->overrun);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
return TRACE_TYPE_HANDLED;
|
||||
}
|
||||
|
||||
enum print_line_t
|
||||
print_graph_function(struct trace_iterator *iter)
|
||||
{
|
||||
struct trace_seq *s = &iter->seq;
|
||||
struct trace_entry *entry = iter->ent;
|
||||
|
||||
switch (entry->type) {
|
||||
case TRACE_GRAPH_ENT: {
|
||||
struct ftrace_graph_ent_entry *field;
|
||||
trace_assign_type(field, entry);
|
||||
return print_graph_entry(&field->graph_ent, s, entry,
|
||||
iter->cpu);
|
||||
}
|
||||
case TRACE_GRAPH_RET: {
|
||||
struct ftrace_graph_ret_entry *field;
|
||||
trace_assign_type(field, entry);
|
||||
return print_graph_return(&field->ret, s, entry, iter->cpu);
|
||||
}
|
||||
default:
|
||||
return TRACE_TYPE_UNHANDLED;
|
||||
}
|
||||
}
|
||||
|
||||
static struct tracer graph_trace __read_mostly = {
|
||||
.name = "function-graph",
|
||||
.init = graph_trace_init,
|
||||
.reset = graph_trace_reset,
|
||||
.print_line = print_graph_function,
|
||||
.flags = &tracer_flags,
|
||||
};
|
||||
|
||||
static __init int init_graph_trace(void)
|
||||
{
|
||||
return register_tracer(&graph_trace);
|
||||
}
|
||||
|
||||
device_initcall(init_graph_trace);
|
@@ -1,98 +0,0 @@
|
||||
/*
|
||||
*
|
||||
* Function return tracer.
|
||||
* Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
|
||||
* Mostly borrowed from function tracer which
|
||||
* is Copyright (c) Steven Rostedt <srostedt@redhat.com>
|
||||
*
|
||||
*/
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/fs.h>
|
||||
|
||||
#include "trace.h"
|
||||
|
||||
|
||||
#define TRACE_RETURN_PRINT_OVERRUN 0x1
|
||||
static struct tracer_opt trace_opts[] = {
|
||||
/* Display overruns or not */
|
||||
{ TRACER_OPT(overrun, TRACE_RETURN_PRINT_OVERRUN) },
|
||||
{ } /* Empty entry */
|
||||
};
|
||||
|
||||
static struct tracer_flags tracer_flags = {
|
||||
.val = 0, /* Don't display overruns by default */
|
||||
.opts = trace_opts
|
||||
};
|
||||
|
||||
|
||||
static int return_trace_init(struct trace_array *tr)
|
||||
{
|
||||
int cpu;
|
||||
for_each_online_cpu(cpu)
|
||||
tracing_reset(tr, cpu);
|
||||
|
||||
return register_ftrace_return(&trace_function_return);
|
||||
}
|
||||
|
||||
static void return_trace_reset(struct trace_array *tr)
|
||||
{
|
||||
unregister_ftrace_return();
|
||||
}
|
||||
|
||||
|
||||
enum print_line_t
|
||||
print_return_function(struct trace_iterator *iter)
|
||||
{
|
||||
struct trace_seq *s = &iter->seq;
|
||||
struct trace_entry *entry = iter->ent;
|
||||
struct ftrace_ret_entry *field;
|
||||
int ret;
|
||||
|
||||
if (entry->type == TRACE_FN_RET) {
|
||||
trace_assign_type(field, entry);
|
||||
ret = trace_seq_printf(s, "%pF -> ", (void *)field->parent_ip);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
ret = seq_print_ip_sym(s, field->ip,
|
||||
trace_flags & TRACE_ITER_SYM_MASK);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
ret = trace_seq_printf(s, " (%llu ns)",
|
||||
field->rettime - field->calltime);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
if (tracer_flags.val & TRACE_RETURN_PRINT_OVERRUN) {
|
||||
ret = trace_seq_printf(s, " (Overruns: %lu)",
|
||||
field->overrun);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
ret = trace_seq_printf(s, "\n");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
return TRACE_TYPE_HANDLED;
|
||||
}
|
||||
return TRACE_TYPE_UNHANDLED;
|
||||
}
|
||||
|
||||
static struct tracer return_trace __read_mostly = {
|
||||
.name = "return",
|
||||
.init = return_trace_init,
|
||||
.reset = return_trace_reset,
|
||||
.print_line = print_return_function,
|
||||
.flags = &tracer_flags,
|
||||
};
|
||||
|
||||
static __init int init_return_trace(void)
|
||||
{
|
||||
return register_tracer(&return_trace);
|
||||
}
|
||||
|
||||
device_initcall(init_return_trace);
|
179
kernel/trace/trace_power.c
Normal file
179
kernel/trace/trace_power.c
Normal file
@@ -0,0 +1,179 @@
|
||||
/*
|
||||
* ring buffer based C-state tracer
|
||||
*
|
||||
* Arjan van de Ven <arjan@linux.intel.com>
|
||||
* Copyright (C) 2008 Intel Corporation
|
||||
*
|
||||
* Much is borrowed from trace_boot.c which is
|
||||
* Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include "trace.h"
|
||||
|
||||
static struct trace_array *power_trace;
|
||||
static int __read_mostly trace_power_enabled;
|
||||
|
||||
|
||||
static void start_power_trace(struct trace_array *tr)
|
||||
{
|
||||
trace_power_enabled = 1;
|
||||
}
|
||||
|
||||
static void stop_power_trace(struct trace_array *tr)
|
||||
{
|
||||
trace_power_enabled = 0;
|
||||
}
|
||||
|
||||
|
||||
static int power_trace_init(struct trace_array *tr)
|
||||
{
|
||||
int cpu;
|
||||
power_trace = tr;
|
||||
|
||||
trace_power_enabled = 1;
|
||||
|
||||
for_each_cpu_mask(cpu, cpu_possible_map)
|
||||
tracing_reset(tr, cpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static enum print_line_t power_print_line(struct trace_iterator *iter)
|
||||
{
|
||||
int ret = 0;
|
||||
struct trace_entry *entry = iter->ent;
|
||||
struct trace_power *field ;
|
||||
struct power_trace *it;
|
||||
struct trace_seq *s = &iter->seq;
|
||||
struct timespec stamp;
|
||||
struct timespec duration;
|
||||
|
||||
trace_assign_type(field, entry);
|
||||
it = &field->state_data;
|
||||
stamp = ktime_to_timespec(it->stamp);
|
||||
duration = ktime_to_timespec(ktime_sub(it->end, it->stamp));
|
||||
|
||||
if (entry->type == TRACE_POWER) {
|
||||
if (it->type == POWER_CSTATE)
|
||||
ret = trace_seq_printf(s, "[%5ld.%09ld] CSTATE: Going to C%i on cpu %i for %ld.%09ld\n",
|
||||
stamp.tv_sec,
|
||||
stamp.tv_nsec,
|
||||
it->state, iter->cpu,
|
||||
duration.tv_sec,
|
||||
duration.tv_nsec);
|
||||
if (it->type == POWER_PSTATE)
|
||||
ret = trace_seq_printf(s, "[%5ld.%09ld] PSTATE: Going to P%i on cpu %i\n",
|
||||
stamp.tv_sec,
|
||||
stamp.tv_nsec,
|
||||
it->state, iter->cpu);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
return TRACE_TYPE_HANDLED;
|
||||
}
|
||||
return TRACE_TYPE_UNHANDLED;
|
||||
}
|
||||
|
||||
static struct tracer power_tracer __read_mostly =
|
||||
{
|
||||
.name = "power",
|
||||
.init = power_trace_init,
|
||||
.start = start_power_trace,
|
||||
.stop = stop_power_trace,
|
||||
.reset = stop_power_trace,
|
||||
.print_line = power_print_line,
|
||||
};
|
||||
|
||||
static int init_power_trace(void)
|
||||
{
|
||||
return register_tracer(&power_tracer);
|
||||
}
|
||||
device_initcall(init_power_trace);
|
||||
|
||||
void trace_power_start(struct power_trace *it, unsigned int type,
|
||||
unsigned int level)
|
||||
{
|
||||
if (!trace_power_enabled)
|
||||
return;
|
||||
|
||||
memset(it, 0, sizeof(struct power_trace));
|
||||
it->state = level;
|
||||
it->type = type;
|
||||
it->stamp = ktime_get();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(trace_power_start);
|
||||
|
||||
|
||||
void trace_power_end(struct power_trace *it)
|
||||
{
|
||||
struct ring_buffer_event *event;
|
||||
struct trace_power *entry;
|
||||
struct trace_array_cpu *data;
|
||||
unsigned long irq_flags;
|
||||
struct trace_array *tr = power_trace;
|
||||
|
||||
if (!trace_power_enabled)
|
||||
return;
|
||||
|
||||
preempt_disable();
|
||||
it->end = ktime_get();
|
||||
data = tr->data[smp_processor_id()];
|
||||
|
||||
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
|
||||
&irq_flags);
|
||||
if (!event)
|
||||
goto out;
|
||||
entry = ring_buffer_event_data(event);
|
||||
tracing_generic_entry_update(&entry->ent, 0, 0);
|
||||
entry->ent.type = TRACE_POWER;
|
||||
entry->state_data = *it;
|
||||
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
|
||||
|
||||
trace_wake_up();
|
||||
|
||||
out:
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(trace_power_end);
|
||||
|
||||
void trace_power_mark(struct power_trace *it, unsigned int type,
|
||||
unsigned int level)
|
||||
{
|
||||
struct ring_buffer_event *event;
|
||||
struct trace_power *entry;
|
||||
struct trace_array_cpu *data;
|
||||
unsigned long irq_flags;
|
||||
struct trace_array *tr = power_trace;
|
||||
|
||||
if (!trace_power_enabled)
|
||||
return;
|
||||
|
||||
memset(it, 0, sizeof(struct power_trace));
|
||||
it->state = level;
|
||||
it->type = type;
|
||||
it->stamp = ktime_get();
|
||||
preempt_disable();
|
||||
it->end = it->stamp;
|
||||
data = tr->data[smp_processor_id()];
|
||||
|
||||
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
|
||||
&irq_flags);
|
||||
if (!event)
|
||||
goto out;
|
||||
entry = ring_buffer_event_data(event);
|
||||
tracing_generic_entry_update(&entry->ent, 0, 0);
|
||||
entry->ent.type = TRACE_POWER;
|
||||
entry->state_data = *it;
|
||||
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
|
||||
|
||||
trace_wake_up();
|
||||
|
||||
out:
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(trace_power_mark);
|
Reference in New Issue
Block a user