Merge branch 'tip/perf/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into perf/urgent

This commit is contained in:
Ingo Molnar
2011-05-27 14:28:09 +02:00
bovenliggende b1d2dc3c06 b1cff0ad10
commit d6a72fe465
17 gewijzigde bestanden met toevoegingen van 148 en 41 verwijderingen

Bestand weergeven

@@ -105,9 +105,12 @@ static int __jump_label_text_reserved(struct jump_entry *iter_start,
}
static void __jump_label_update(struct jump_label_key *key,
struct jump_entry *entry, int enable)
struct jump_entry *entry,
struct jump_entry *stop, int enable)
{
for (; entry->key == (jump_label_t)(unsigned long)key; entry++) {
for (; (entry < stop) &&
(entry->key == (jump_label_t)(unsigned long)key);
entry++) {
/*
* entry->code set to 0 invalidates module init text sections
* kernel_text_address() verifies we are not in core kernel
@@ -181,7 +184,11 @@ static void __jump_label_mod_update(struct jump_label_key *key, int enable)
struct jump_label_mod *mod = key->next;
while (mod) {
__jump_label_update(key, mod->entries, enable);
struct module *m = mod->mod;
__jump_label_update(key, mod->entries,
m->jump_entries + m->num_jump_entries,
enable);
mod = mod->next;
}
}
@@ -245,7 +252,8 @@ static int jump_label_add_module(struct module *mod)
key->next = jlm;
if (jump_label_enabled(key))
__jump_label_update(key, iter, JUMP_LABEL_ENABLE);
__jump_label_update(key, iter, iter_stop,
JUMP_LABEL_ENABLE);
}
return 0;
@@ -371,7 +379,7 @@ static void jump_label_update(struct jump_label_key *key, int enable)
/* if there are no users, entry can be NULL */
if (entry)
__jump_label_update(key, entry, enable);
__jump_label_update(key, entry, __stop___jump_table, enable);
#ifdef CONFIG_MODULES
__jump_label_mod_update(key, enable);

Bestand weergeven

@@ -109,12 +109,18 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
static void ftrace_global_list_func(unsigned long ip,
unsigned long parent_ip)
{
struct ftrace_ops *op = rcu_dereference_raw(ftrace_global_list); /*see above*/
struct ftrace_ops *op;
if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
return;
trace_recursion_set(TRACE_GLOBAL_BIT);
op = rcu_dereference_raw(ftrace_global_list); /*see above*/
while (op != &ftrace_list_end) {
op->func(ip, parent_ip);
op = rcu_dereference_raw(op->next); /*see above*/
};
trace_recursion_clear(TRACE_GLOBAL_BIT);
}
static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
@@ -1638,12 +1644,12 @@ static void ftrace_startup_enable(int command)
ftrace_run_update_code(command);
}
static void ftrace_startup(struct ftrace_ops *ops, int command)
static int ftrace_startup(struct ftrace_ops *ops, int command)
{
bool hash_enable = true;
if (unlikely(ftrace_disabled))
return;
return -ENODEV;
ftrace_start_up++;
command |= FTRACE_ENABLE_CALLS;
@@ -1662,6 +1668,8 @@ static void ftrace_startup(struct ftrace_ops *ops, int command)
ftrace_hash_rec_enable(ops, 1);
ftrace_startup_enable(command);
return 0;
}
static void ftrace_shutdown(struct ftrace_ops *ops, int command)
@@ -2501,7 +2509,7 @@ static void __enable_ftrace_function_probe(void)
ret = __register_ftrace_function(&trace_probe_ops);
if (!ret)
ftrace_startup(&trace_probe_ops, 0);
ret = ftrace_startup(&trace_probe_ops, 0);
ftrace_probe_registered = 1;
}
@@ -3466,7 +3474,11 @@ device_initcall(ftrace_nodyn_init);
static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
static inline void ftrace_startup_enable(int command) { }
/* Keep as macros so we do not need to define the commands */
# define ftrace_startup(ops, command) do { } while (0)
# define ftrace_startup(ops, command) \
({ \
(ops)->flags |= FTRACE_OPS_FL_ENABLED; \
0; \
})
# define ftrace_shutdown(ops, command) do { } while (0)
# define ftrace_startup_sysctl() do { } while (0)
# define ftrace_shutdown_sysctl() do { } while (0)
@@ -3484,6 +3496,10 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
{
struct ftrace_ops *op;
if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT)))
return;
trace_recursion_set(TRACE_INTERNAL_BIT);
/*
* Some of the ops may be dynamically allocated,
* they must be freed after a synchronize_sched().
@@ -3496,6 +3512,7 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
op = rcu_dereference_raw(op->next);
};
preempt_enable_notrace();
trace_recursion_clear(TRACE_INTERNAL_BIT);
}
static void clear_ftrace_swapper(void)
@@ -3799,7 +3816,7 @@ int register_ftrace_function(struct ftrace_ops *ops)
ret = __register_ftrace_function(ops);
if (!ret)
ftrace_startup(ops, 0);
ret = ftrace_startup(ops, 0);
out_unlock:
@@ -4045,7 +4062,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
ftrace_graph_return = retfunc;
ftrace_graph_entry = entryfunc;
ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
out:
mutex_unlock(&ftrace_lock);

Bestand weergeven

@@ -2216,7 +2216,7 @@ static noinline void trace_recursive_fail(void)
printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
"HC[%lu]:SC[%lu]:NMI[%lu]\n",
current->trace_recursion,
trace_recursion_buffer(),
hardirq_count() >> HARDIRQ_SHIFT,
softirq_count() >> SOFTIRQ_SHIFT,
in_nmi());
@@ -2226,9 +2226,9 @@ static noinline void trace_recursive_fail(void)
static inline int trace_recursive_lock(void)
{
current->trace_recursion++;
trace_recursion_inc();
if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
if (likely(trace_recursion_buffer() < TRACE_RECURSIVE_DEPTH))
return 0;
trace_recursive_fail();
@@ -2238,9 +2238,9 @@ static inline int trace_recursive_lock(void)
static inline void trace_recursive_unlock(void)
{
WARN_ON_ONCE(!current->trace_recursion);
WARN_ON_ONCE(!trace_recursion_buffer());
current->trace_recursion--;
trace_recursion_dec();
}
#else

Bestand weergeven

@@ -784,4 +784,19 @@ extern const char *__stop___trace_bprintk_fmt[];
FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
#include "trace_entries.h"
/* Only current can touch trace_recursion */
#define trace_recursion_inc() do { (current)->trace_recursion++; } while (0)
#define trace_recursion_dec() do { (current)->trace_recursion--; } while (0)
/* Ring buffer has the 10 LSB bits to count */
#define trace_recursion_buffer() ((current)->trace_recursion & 0x3ff)
/* for function tracing recursion */
#define TRACE_INTERNAL_BIT (1<<11)
#define TRACE_GLOBAL_BIT (1<<12)
#define trace_recursion_set(bit) do { (current)->trace_recursion |= (bit); } while (0)
#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(bit); } while (0)
#define trace_recursion_test(bit) ((current)->trace_recursion & (bit))
#endif /* _LINUX_KERNEL_TRACE_H */

Bestand weergeven

@@ -1657,7 +1657,12 @@ static struct ftrace_ops trace_ops __initdata =
static __init void event_trace_self_test_with_function(void)
{
register_ftrace_function(&trace_ops);
int ret;
ret = register_ftrace_function(&trace_ops);
if (WARN_ON(ret < 0)) {
pr_info("Failed to enable function tracer for event tests\n");
return;
}
pr_info("Running tests again, along with the function tracer\n");
event_trace_self_tests();
unregister_ftrace_function(&trace_ops);

Bestand weergeven

@@ -353,6 +353,33 @@ ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
}
EXPORT_SYMBOL(ftrace_print_symbols_seq);
#if BITS_PER_LONG == 32
const char *
ftrace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val,
const struct trace_print_flags_u64 *symbol_array)
{
int i;
const char *ret = p->buffer + p->len;
for (i = 0; symbol_array[i].name; i++) {
if (val != symbol_array[i].mask)
continue;
trace_seq_puts(p, symbol_array[i].name);
break;
}
if (!p->len)
trace_seq_printf(p, "0x%llx", val);
trace_seq_putc(p, 0);
return ret;
}
EXPORT_SYMBOL(ftrace_print_symbols_seq_u64);
#endif
const char *
ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len)
{