Merge tag 'trace-v4.15' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing updates from

 - allow module init functions to be traced

 - clean up some unused or not used by config events (saves space)

 - clean up of trace histogram code

 - add support for preempt and interrupt enabled/disable events

 - other various clean ups

* tag 'trace-v4.15' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (30 commits)
  tracing, thermal: Hide cpu cooling trace events when not in use
  tracing, thermal: Hide devfreq trace events when not in use
  ftrace: Kill FTRACE_OPS_FL_PER_CPU
  perf/ftrace: Small cleanup
  perf/ftrace: Fix function trace events
  perf/ftrace: Revert ("perf/ftrace: Fix double traces of perf on ftrace:function")
  tracing, dma-buf: Remove unused trace event dma_fence_annotate_wait_on
  tracing, memcg, vmscan: Hide trace events when not in use
  tracing/xen: Hide events that are not used when X86_PAE is not defined
  tracing: mark trace_test_buffer as __maybe_unused
  printk: Remove superfluous memory barriers from printk_safe
  ftrace: Clear hashes of stale ips of init memory
  tracing: Add support for preempt and irq enable/disable events
  tracing: Prepare to add preempt and irq trace events
  ftrace/kallsyms: Have /proc/kallsyms show saved mod init functions
  ftrace: Add freeing algorithm to free ftrace_mod_maps
  ftrace: Save module init functions kallsyms symbols for tracing
  ftrace: Allow module init functions to be traced
  ftrace: Add a ftrace_free_mem() function for modules to use
  tracing: Reimplement log2
  ...
This commit is contained in:
Linus Torvalds
2017-11-17 14:58:01 -08:00
32 changed files with 903 additions and 518 deletions

View File

@@ -203,30 +203,6 @@ void clear_ftrace_function(void)
ftrace_trace_function = ftrace_stub;
}
static void per_cpu_ops_disable_all(struct ftrace_ops *ops)
{
int cpu;
for_each_possible_cpu(cpu)
*per_cpu_ptr(ops->disabled, cpu) = 1;
}
static int per_cpu_ops_alloc(struct ftrace_ops *ops)
{
int __percpu *disabled;
if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
return -EINVAL;
disabled = alloc_percpu(int);
if (!disabled)
return -ENOMEM;
ops->disabled = disabled;
per_cpu_ops_disable_all(ops);
return 0;
}
static void ftrace_sync(struct work_struct *work)
{
/*
@@ -262,8 +238,8 @@ static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
* If this is a dynamic, RCU, or per CPU ops, or we force list func,
* then it needs to call the list anyway.
*/
if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU |
FTRACE_OPS_FL_RCU) || FTRACE_FORCE_LIST_FUNC)
if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) ||
FTRACE_FORCE_LIST_FUNC)
return ftrace_ops_list_func;
return ftrace_ops_get_func(ops);
@@ -422,11 +398,6 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
if (!core_kernel_data((unsigned long)ops))
ops->flags |= FTRACE_OPS_FL_DYNAMIC;
if (ops->flags & FTRACE_OPS_FL_PER_CPU) {
if (per_cpu_ops_alloc(ops))
return -ENOMEM;
}
add_ftrace_ops(&ftrace_ops_list, ops);
/* Always save the function, and reset at unregistering */
@@ -2727,11 +2698,6 @@ void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
{
}
static void per_cpu_ops_free(struct ftrace_ops *ops)
{
free_percpu(ops->disabled);
}
static void ftrace_startup_enable(int command)
{
if (saved_ftrace_func != ftrace_trace_function) {
@@ -2833,7 +2799,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
* not currently active, we can just free them
* without synchronizing all CPUs.
*/
if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU))
if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
goto free_ops;
return 0;
@@ -2880,7 +2846,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
* The same goes for freeing the per_cpu data of the per_cpu
* ops.
*/
if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU)) {
if (ops->flags & FTRACE_OPS_FL_DYNAMIC) {
/*
* We need to do a hard force of sched synchronization.
* This is because we use preempt_disable() to do RCU, but
@@ -2903,9 +2869,6 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
free_ops:
arch_ftrace_trampoline_free(ops);
if (ops->flags & FTRACE_OPS_FL_PER_CPU)
per_cpu_ops_free(ops);
}
return 0;
@@ -5672,10 +5635,29 @@ static int ftrace_process_locs(struct module *mod,
return ret;
}
struct ftrace_mod_func {
struct list_head list;
char *name;
unsigned long ip;
unsigned int size;
};
struct ftrace_mod_map {
struct rcu_head rcu;
struct list_head list;
struct module *mod;
unsigned long start_addr;
unsigned long end_addr;
struct list_head funcs;
unsigned int num_funcs;
};
#ifdef CONFIG_MODULES
#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
static LIST_HEAD(ftrace_mod_maps);
static int referenced_filters(struct dyn_ftrace *rec)
{
struct ftrace_ops *ops;
@@ -5729,8 +5711,26 @@ static void clear_mod_from_hashes(struct ftrace_page *pg)
mutex_unlock(&trace_types_lock);
}
static void ftrace_free_mod_map(struct rcu_head *rcu)
{
struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu);
struct ftrace_mod_func *mod_func;
struct ftrace_mod_func *n;
/* All the contents of mod_map are now not visible to readers */
list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) {
kfree(mod_func->name);
list_del(&mod_func->list);
kfree(mod_func);
}
kfree(mod_map);
}
void ftrace_release_mod(struct module *mod)
{
struct ftrace_mod_map *mod_map;
struct ftrace_mod_map *n;
struct dyn_ftrace *rec;
struct ftrace_page **last_pg;
struct ftrace_page *tmp_page = NULL;
@@ -5742,6 +5742,14 @@ void ftrace_release_mod(struct module *mod)
if (ftrace_disabled)
goto out_unlock;
list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
if (mod_map->mod == mod) {
list_del_rcu(&mod_map->list);
call_rcu_sched(&mod_map->rcu, ftrace_free_mod_map);
break;
}
}
/*
* Each module has its own ftrace_pages, remove
* them from the list.
@@ -5749,7 +5757,8 @@ void ftrace_release_mod(struct module *mod)
last_pg = &ftrace_pages_start;
for (pg = ftrace_pages_start; pg; pg = *last_pg) {
rec = &pg->records[0];
if (within_module_core(rec->ip, mod)) {
if (within_module_core(rec->ip, mod) ||
within_module_init(rec->ip, mod)) {
/*
* As core pages are first, the first
* page should never be a module page.
@@ -5818,7 +5827,8 @@ void ftrace_module_enable(struct module *mod)
* not part of this module, then skip this pg,
* which the "break" will do.
*/
if (!within_module_core(rec->ip, mod))
if (!within_module_core(rec->ip, mod) &&
!within_module_init(rec->ip, mod))
break;
cnt = 0;
@@ -5863,23 +5873,245 @@ void ftrace_module_init(struct module *mod)
ftrace_process_locs(mod, mod->ftrace_callsites,
mod->ftrace_callsites + mod->num_ftrace_callsites);
}
static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
struct dyn_ftrace *rec)
{
struct ftrace_mod_func *mod_func;
unsigned long symsize;
unsigned long offset;
char str[KSYM_SYMBOL_LEN];
char *modname;
const char *ret;
ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str);
if (!ret)
return;
mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL);
if (!mod_func)
return;
mod_func->name = kstrdup(str, GFP_KERNEL);
if (!mod_func->name) {
kfree(mod_func);
return;
}
mod_func->ip = rec->ip - offset;
mod_func->size = symsize;
mod_map->num_funcs++;
list_add_rcu(&mod_func->list, &mod_map->funcs);
}
static struct ftrace_mod_map *
allocate_ftrace_mod_map(struct module *mod,
unsigned long start, unsigned long end)
{
struct ftrace_mod_map *mod_map;
mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL);
if (!mod_map)
return NULL;
mod_map->mod = mod;
mod_map->start_addr = start;
mod_map->end_addr = end;
mod_map->num_funcs = 0;
INIT_LIST_HEAD_RCU(&mod_map->funcs);
list_add_rcu(&mod_map->list, &ftrace_mod_maps);
return mod_map;
}
static const char *
ftrace_func_address_lookup(struct ftrace_mod_map *mod_map,
unsigned long addr, unsigned long *size,
unsigned long *off, char *sym)
{
struct ftrace_mod_func *found_func = NULL;
struct ftrace_mod_func *mod_func;
list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
if (addr >= mod_func->ip &&
addr < mod_func->ip + mod_func->size) {
found_func = mod_func;
break;
}
}
if (found_func) {
if (size)
*size = found_func->size;
if (off)
*off = addr - found_func->ip;
if (sym)
strlcpy(sym, found_func->name, KSYM_NAME_LEN);
return found_func->name;
}
return NULL;
}
const char *
ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char **modname, char *sym)
{
struct ftrace_mod_map *mod_map;
const char *ret = NULL;
/* mod_map is freed via call_rcu_sched() */
preempt_disable();
list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
if (ret) {
if (modname)
*modname = mod_map->mod->name;
break;
}
}
preempt_enable();
return ret;
}
int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
char *type, char *name,
char *module_name, int *exported)
{
struct ftrace_mod_map *mod_map;
struct ftrace_mod_func *mod_func;
preempt_disable();
list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
if (symnum >= mod_map->num_funcs) {
symnum -= mod_map->num_funcs;
continue;
}
list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
if (symnum > 1) {
symnum--;
continue;
}
*value = mod_func->ip;
*type = 'T';
strlcpy(name, mod_func->name, KSYM_NAME_LEN);
strlcpy(module_name, mod_map->mod->name, MODULE_NAME_LEN);
*exported = 1;
preempt_enable();
return 0;
}
WARN_ON(1);
break;
}
preempt_enable();
return -ERANGE;
}
#else
static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
struct dyn_ftrace *rec) { }
static inline struct ftrace_mod_map *
allocate_ftrace_mod_map(struct module *mod,
unsigned long start, unsigned long end)
{
return NULL;
}
#endif /* CONFIG_MODULES */
void __init ftrace_free_init_mem(void)
struct ftrace_init_func {
struct list_head list;
unsigned long ip;
};
/* Clear any init ips from hashes */
static void
clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash)
{
unsigned long start = (unsigned long)(&__init_begin);
unsigned long end = (unsigned long)(&__init_end);
struct ftrace_func_entry *entry;
if (ftrace_hash_empty(hash))
return;
entry = __ftrace_lookup_ip(hash, func->ip);
/*
* Do not allow this rec to match again.
* Yeah, it may waste some memory, but will be removed
* if/when the hash is modified again.
*/
if (entry)
entry->ip = 0;
}
static void
clear_func_from_hashes(struct ftrace_init_func *func)
{
struct trace_array *tr;
mutex_lock(&trace_types_lock);
list_for_each_entry(tr, &ftrace_trace_arrays, list) {
if (!tr->ops || !tr->ops->func_hash)
continue;
mutex_lock(&tr->ops->func_hash->regex_lock);
clear_func_from_hash(func, tr->ops->func_hash->filter_hash);
clear_func_from_hash(func, tr->ops->func_hash->notrace_hash);
mutex_unlock(&tr->ops->func_hash->regex_lock);
}
mutex_unlock(&trace_types_lock);
}
static void add_to_clear_hash_list(struct list_head *clear_list,
struct dyn_ftrace *rec)
{
struct ftrace_init_func *func;
func = kmalloc(sizeof(*func), GFP_KERNEL);
if (!func) {
WARN_ONCE(1, "alloc failure, ftrace filter could be stale\n");
return;
}
func->ip = rec->ip;
list_add(&func->list, clear_list);
}
void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
{
unsigned long start = (unsigned long)(start_ptr);
unsigned long end = (unsigned long)(end_ptr);
struct ftrace_page **last_pg = &ftrace_pages_start;
struct ftrace_page *pg;
struct dyn_ftrace *rec;
struct dyn_ftrace key;
struct ftrace_mod_map *mod_map = NULL;
struct ftrace_init_func *func, *func_next;
struct list_head clear_hash;
int order;
INIT_LIST_HEAD(&clear_hash);
key.ip = start;
key.flags = end; /* overload flags, as it is unsigned long */
mutex_lock(&ftrace_lock);
/*
* If we are freeing module init memory, then check if
* any tracer is active. If so, we need to save a mapping of
* the module functions being freed with the address.
*/
if (mod && ftrace_ops_list != &ftrace_list_end)
mod_map = allocate_ftrace_mod_map(mod, start, end);
for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) {
if (end < pg->records[0].ip ||
start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
@@ -5890,6 +6122,13 @@ void __init ftrace_free_init_mem(void)
ftrace_cmp_recs);
if (!rec)
continue;
/* rec will be cleared from hashes after ftrace_lock unlock */
add_to_clear_hash_list(&clear_hash, rec);
if (mod_map)
save_ftrace_mod_rec(mod_map, rec);
pg->index--;
ftrace_update_tot_cnt--;
if (!pg->index) {
@@ -5908,6 +6147,19 @@ void __init ftrace_free_init_mem(void)
goto again;
}
mutex_unlock(&ftrace_lock);
list_for_each_entry_safe(func, func_next, &clear_hash, list) {
clear_func_from_hashes(func);
kfree(func);
}
}
void __init ftrace_free_init_mem(void)
{
void *start = (void *)(&__init_begin);
void *end = (void *)(&__init_end);
ftrace_free_mem(NULL, start, end);
}
void __init ftrace_init(void)
@@ -6063,10 +6315,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
* If any of the above fails then the op->func() is not executed.
*/
if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) &&
(!(op->flags & FTRACE_OPS_FL_PER_CPU) ||
!ftrace_function_local_disabled(op)) &&
ftrace_ops_test(op, ip, regs)) {
if (FTRACE_WARN_ON(!op->func)) {
pr_warn("op=%p %pS\n", op, op);
goto out;
@@ -6124,10 +6373,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
preempt_disable_notrace();
if (!(op->flags & FTRACE_OPS_FL_PER_CPU) ||
!ftrace_function_local_disabled(op)) {
op->func(ip, parent_ip, op, regs);
}
op->func(ip, parent_ip, op, regs);
preempt_enable_notrace();
trace_clear_recursion(bit);
@@ -6151,7 +6397,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
* or does per cpu logic, then we need to call the assist handler.
*/
if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE) ||
ops->flags & (FTRACE_OPS_FL_RCU | FTRACE_OPS_FL_PER_CPU))
ops->flags & FTRACE_OPS_FL_RCU)
return ftrace_ops_assist_func;
return ops->func;