Merge tag 'noinstr-lds-2020-05-19' into core/rcu

Get the noinstr section and annotation markers to base the RCU parts on.
This commit is contained in:
Thomas Gleixner
2020-05-19 15:50:34 +02:00
886 changed files with 9240 additions and 4566 deletions

View File

@@ -467,7 +467,6 @@ config PROFILE_ANNOTATED_BRANCHES
config PROFILE_ALL_BRANCHES
bool "Profile all if conditionals" if !FORTIFY_SOURCE
select TRACE_BRANCH_PROFILING
imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED # avoid false positives
help
This tracer profiles all branch conditions. Every if ()
taken in the kernel is recorded whether it hit or miss.

View File

@@ -5154,6 +5154,7 @@ int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
list_del_rcu(&direct->next);
synchronize_rcu_tasks();
kfree(direct);
kfree(entry);
ftrace_direct_func_count--;
}
}

View File

@@ -113,22 +113,42 @@ static int preemptirq_delay_run(void *data)
for (i = 0; i < s; i++)
(testfuncs[i])(i);
set_current_state(TASK_INTERRUPTIBLE);
while (!kthread_should_stop()) {
schedule();
set_current_state(TASK_INTERRUPTIBLE);
}
__set_current_state(TASK_RUNNING);
return 0;
}
static struct task_struct *preemptirq_start_test(void)
static int preemptirq_run_test(void)
{
struct task_struct *task;
char task_name[50];
snprintf(task_name, sizeof(task_name), "%s_test", test_mode);
return kthread_run(preemptirq_delay_run, NULL, task_name);
task = kthread_run(preemptirq_delay_run, NULL, task_name);
if (IS_ERR(task))
return PTR_ERR(task);
if (task)
kthread_stop(task);
return 0;
}
static ssize_t trigger_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
preemptirq_start_test();
ssize_t ret;
ret = preemptirq_run_test();
if (ret)
return ret;
return count;
}
@@ -148,11 +168,9 @@ static struct kobject *preemptirq_delay_kobj;
static int __init preemptirq_delay_init(void)
{
struct task_struct *test_task;
int retval;
test_task = preemptirq_start_test();
retval = PTR_ERR_OR_ZERO(test_task);
retval = preemptirq_run_test();
if (retval != 0)
return retval;

View File

@@ -947,7 +947,8 @@ int __trace_bputs(unsigned long ip, const char *str)
EXPORT_SYMBOL_GPL(__trace_bputs);
#ifdef CONFIG_TRACER_SNAPSHOT
void tracing_snapshot_instance_cond(struct trace_array *tr, void *cond_data)
static void tracing_snapshot_instance_cond(struct trace_array *tr,
void *cond_data)
{
struct tracer *tracer = tr->current_trace;
unsigned long flags;
@@ -8525,6 +8526,19 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
*/
allocate_snapshot = false;
#endif
/*
* Because of some magic with the way alloc_percpu() works on
* x86_64, we need to synchronize the pgd of all the tables,
* otherwise the trace events that happen in x86_64 page fault
* handlers can't cope with accessing the chance that a
* alloc_percpu()'d memory might be touched in the page fault trace
* event. Oh, and we need to audit all other alloc_percpu() and vmalloc()
* calls in tracing, because something might get triggered within a
* page fault trace event!
*/
vmalloc_sync_mappings();
return 0;
}

View File

@@ -95,23 +95,19 @@ trace_boot_add_kprobe_event(struct xbc_node *node, const char *event)
struct xbc_node *anode;
char buf[MAX_BUF_LEN];
const char *val;
int ret;
kprobe_event_cmd_init(&cmd, buf, MAX_BUF_LEN);
ret = kprobe_event_gen_cmd_start(&cmd, event, NULL);
if (ret)
return ret;
int ret = 0;
xbc_node_for_each_array_value(node, "probes", anode, val) {
ret = kprobe_event_add_field(&cmd, val);
if (ret)
return ret;
}
kprobe_event_cmd_init(&cmd, buf, MAX_BUF_LEN);
ret = kprobe_event_gen_cmd_end(&cmd);
if (ret)
pr_err("Failed to add probe: %s\n", buf);
ret = kprobe_event_gen_cmd_start(&cmd, event, val);
if (ret)
break;
ret = kprobe_event_gen_cmd_end(&cmd);
if (ret)
pr_err("Failed to add probe: %s\n", buf);
}
return ret;
}

View File

@@ -3320,6 +3320,9 @@ static void __destroy_hist_field(struct hist_field *hist_field)
kfree(hist_field->name);
kfree(hist_field->type);
kfree(hist_field->system);
kfree(hist_field->event_name);
kfree(hist_field);
}
@@ -4382,6 +4385,7 @@ static struct hist_field *create_var(struct hist_trigger_data *hist_data,
goto out;
}
var->ref = 1;
var->flags = HIST_FIELD_FL_VAR;
var->var.idx = idx;
var->var.hist_data = var->hist_data = hist_data;
@@ -5011,6 +5015,9 @@ static void destroy_field_vars(struct hist_trigger_data *hist_data)
for (i = 0; i < hist_data->n_field_vars; i++)
destroy_field_var(hist_data->field_vars[i]);
for (i = 0; i < hist_data->n_save_vars; i++)
destroy_field_var(hist_data->save_vars[i]);
}
static void save_field_var(struct hist_trigger_data *hist_data,

View File

@@ -453,7 +453,7 @@ static bool __within_notrace_func(unsigned long addr)
static bool within_notrace_func(struct trace_kprobe *tk)
{
unsigned long addr = addr = trace_kprobe_address(tk);
unsigned long addr = trace_kprobe_address(tk);
char symname[KSYM_NAME_LEN], *p;
if (!__within_notrace_func(addr))
@@ -940,6 +940,9 @@ EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
* complete command or only the first part of it; in the latter case,
* kprobe_event_add_fields() can be used to add more fields following this.
*
* Unlikely the synth_event_gen_cmd_start(), @loc must be specified. This
* returns -EINVAL if @loc == NULL.
*
* Return: 0 if successful, error otherwise.
*/
int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe,
@@ -953,6 +956,9 @@ int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe,
if (cmd->type != DYNEVENT_TYPE_KPROBE)
return -EINVAL;
if (!loc)
return -EINVAL;
if (kretprobe)
snprintf(buf, MAX_EVENT_NAME_LEN, "r:kprobes/%s", name);
else

View File

@@ -283,7 +283,7 @@ int tracing_map_add_key_field(struct tracing_map *map,
return idx;
}
void tracing_map_array_clear(struct tracing_map_array *a)
static void tracing_map_array_clear(struct tracing_map_array *a)
{
unsigned int i;
@@ -294,7 +294,7 @@ void tracing_map_array_clear(struct tracing_map_array *a)
memset(a->pages[i], 0, PAGE_SIZE);
}
void tracing_map_array_free(struct tracing_map_array *a)
static void tracing_map_array_free(struct tracing_map_array *a)
{
unsigned int i;
@@ -316,7 +316,7 @@ void tracing_map_array_free(struct tracing_map_array *a)
kfree(a);
}
struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts,
static struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts,
unsigned int entry_size)
{
struct tracing_map_array *a;