Merge branch 'perf/urgent' into perf/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -10041,28 +10041,27 @@ SYSCALL_DEFINE5(perf_event_open,
|
||||
goto err_context;
|
||||
|
||||
/*
|
||||
* Do not allow to attach to a group in a different
|
||||
* task or CPU context:
|
||||
* Make sure we're both events for the same CPU;
|
||||
* grouping events for different CPUs is broken; since
|
||||
* you can never concurrently schedule them anyhow.
|
||||
*/
|
||||
if (move_group) {
|
||||
/*
|
||||
* Make sure we're both on the same task, or both
|
||||
* per-cpu events.
|
||||
*/
|
||||
if (group_leader->ctx->task != ctx->task)
|
||||
goto err_context;
|
||||
if (group_leader->cpu != event->cpu)
|
||||
goto err_context;
|
||||
|
||||
/*
|
||||
* Make sure we're both events for the same CPU;
|
||||
* grouping events for different CPUs is broken; since
|
||||
* you can never concurrently schedule them anyhow.
|
||||
*/
|
||||
if (group_leader->cpu != event->cpu)
|
||||
goto err_context;
|
||||
} else {
|
||||
if (group_leader->ctx != ctx)
|
||||
goto err_context;
|
||||
}
|
||||
/*
|
||||
* Make sure we're both on the same task, or both
|
||||
* per-CPU events.
|
||||
*/
|
||||
if (group_leader->ctx->task != ctx->task)
|
||||
goto err_context;
|
||||
|
||||
/*
|
||||
* Do not allow to attach to a group in a different task
|
||||
* or CPU context. If we're moving SW events, we'll fix
|
||||
* this up later, so allow that.
|
||||
*/
|
||||
if (!move_group && group_leader->ctx != ctx)
|
||||
goto err_context;
|
||||
|
||||
/*
|
||||
* Only a group leader can be exclusive or pinned
|
||||
|
@@ -889,6 +889,10 @@ static int profile_graph_entry(struct ftrace_graph_ent *trace)
|
||||
|
||||
function_profile_call(trace->func, 0, NULL, NULL);
|
||||
|
||||
/* If function graph is shutting down, ret_stack can be NULL */
|
||||
if (!current->ret_stack)
|
||||
return 0;
|
||||
|
||||
if (index >= 0 && index < FTRACE_RETFUNC_DEPTH)
|
||||
current->ret_stack[index].subtime = 0;
|
||||
|
||||
|
@@ -4386,15 +4386,19 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
|
||||
* the page that was allocated, with the read page of the buffer.
|
||||
*
|
||||
* Returns:
|
||||
* The page allocated, or NULL on error.
|
||||
* The page allocated, or ERR_PTR
|
||||
*/
|
||||
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
|
||||
{
|
||||
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
|
||||
struct ring_buffer_per_cpu *cpu_buffer;
|
||||
struct buffer_data_page *bpage = NULL;
|
||||
unsigned long flags;
|
||||
struct page *page;
|
||||
|
||||
if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
cpu_buffer = buffer->buffers[cpu];
|
||||
local_irq_save(flags);
|
||||
arch_spin_lock(&cpu_buffer->lock);
|
||||
|
||||
@@ -4412,7 +4416,7 @@ void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
|
||||
page = alloc_pages_node(cpu_to_node(cpu),
|
||||
GFP_KERNEL | __GFP_NORETRY, 0);
|
||||
if (!page)
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
bpage = page_address(page);
|
||||
|
||||
@@ -4467,8 +4471,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
|
||||
*
|
||||
* for example:
|
||||
* rpage = ring_buffer_alloc_read_page(buffer, cpu);
|
||||
* if (!rpage)
|
||||
* return error;
|
||||
* if (IS_ERR(rpage))
|
||||
* return PTR_ERR(rpage);
|
||||
* ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
|
||||
* if (ret >= 0)
|
||||
* process_page(rpage, ret);
|
||||
|
@@ -113,7 +113,7 @@ static enum event_status read_page(int cpu)
|
||||
int i;
|
||||
|
||||
bpage = ring_buffer_alloc_read_page(buffer, cpu);
|
||||
if (!bpage)
|
||||
if (IS_ERR(bpage))
|
||||
return EVENT_DROPPED;
|
||||
|
||||
ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1);
|
||||
|
@@ -6598,7 +6598,7 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
|
||||
{
|
||||
struct ftrace_buffer_info *info = filp->private_data;
|
||||
struct trace_iterator *iter = &info->iter;
|
||||
ssize_t ret;
|
||||
ssize_t ret = 0;
|
||||
ssize_t size;
|
||||
|
||||
if (!count)
|
||||
@@ -6612,10 +6612,15 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
|
||||
if (!info->spare) {
|
||||
info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
|
||||
iter->cpu_file);
|
||||
info->spare_cpu = iter->cpu_file;
|
||||
if (IS_ERR(info->spare)) {
|
||||
ret = PTR_ERR(info->spare);
|
||||
info->spare = NULL;
|
||||
} else {
|
||||
info->spare_cpu = iter->cpu_file;
|
||||
}
|
||||
}
|
||||
if (!info->spare)
|
||||
return -ENOMEM;
|
||||
return ret;
|
||||
|
||||
/* Do we have previous read data to read? */
|
||||
if (info->read < PAGE_SIZE)
|
||||
@@ -6790,8 +6795,9 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
|
||||
ref->ref = 1;
|
||||
ref->buffer = iter->trace_buffer->buffer;
|
||||
ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
|
||||
if (!ref->page) {
|
||||
ret = -ENOMEM;
|
||||
if (IS_ERR(ref->page)) {
|
||||
ret = PTR_ERR(ref->page);
|
||||
ref->page = NULL;
|
||||
kfree(ref);
|
||||
break;
|
||||
}
|
||||
@@ -8293,6 +8299,7 @@ __init static int tracer_alloc_buffers(void)
|
||||
if (ret < 0)
|
||||
goto out_free_cpumask;
|
||||
/* Used for event triggers */
|
||||
ret = -ENOMEM;
|
||||
temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
|
||||
if (!temp_buffer)
|
||||
goto out_rm_hp_state;
|
||||
@@ -8407,4 +8414,4 @@ __init static int clear_boot_tracer(void)
|
||||
}
|
||||
|
||||
fs_initcall(tracer_init_tracefs);
|
||||
late_initcall(clear_boot_tracer);
|
||||
late_initcall_sync(clear_boot_tracer);
|
||||
|
@@ -1959,6 +1959,10 @@ static int create_filter(struct trace_event_call *call,
|
||||
if (err && set_str)
|
||||
append_filter_err(ps, filter);
|
||||
}
|
||||
if (err && !set_str) {
|
||||
free_event_filter(filter);
|
||||
filter = NULL;
|
||||
}
|
||||
create_filter_finish(ps);
|
||||
|
||||
*filterp = filter;
|
||||
|
@@ -221,16 +221,19 @@ void tracing_map_array_free(struct tracing_map_array *a)
|
||||
if (!a)
|
||||
return;
|
||||
|
||||
if (!a->pages) {
|
||||
kfree(a);
|
||||
return;
|
||||
}
|
||||
if (!a->pages)
|
||||
goto free;
|
||||
|
||||
for (i = 0; i < a->n_pages; i++) {
|
||||
if (!a->pages[i])
|
||||
break;
|
||||
free_page((unsigned long)a->pages[i]);
|
||||
}
|
||||
|
||||
kfree(a->pages);
|
||||
|
||||
free:
|
||||
kfree(a);
|
||||
}
|
||||
|
||||
struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts,
|
||||
|
Reference in New Issue
Block a user