cpumask: convert kernel trace functions
Impact: Reduce future memory usage, use new cpumask API. (Eventually, cpumask_var_t will be allocated based on nr_cpu_ids, not NR_CPUS). Convert kernel trace functions to use struct cpumask API: 1) Use cpumask_copy/cpumask_test_cpu/for_each_cpu. 2) Use cpumask_var_t and alloc_cpumask_var/free_cpumask_var everywhere. 3) Use on_each_cpu instead of playing with current->cpus_allowed. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Mike Travis <travis@sgi.com> Acked-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
@@ -89,10 +89,10 @@ static inline void ftrace_enable_cpu(void)
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static cpumask_t __read_mostly tracing_buffer_mask;
|
||||
static cpumask_var_t __read_mostly tracing_buffer_mask;
|
||||
|
||||
#define for_each_tracing_cpu(cpu) \
|
||||
for_each_cpu_mask(cpu, tracing_buffer_mask)
|
||||
for_each_cpu(cpu, tracing_buffer_mask)
|
||||
|
||||
/*
|
||||
* ftrace_dump_on_oops - variable to dump ftrace buffer on oops
|
||||
@@ -2646,13 +2646,7 @@ static struct file_operations show_traces_fops = {
|
||||
/*
|
||||
* Only trace on a CPU if the bitmask is set:
|
||||
*/
|
||||
static cpumask_t tracing_cpumask = CPU_MASK_ALL;
|
||||
|
||||
/*
|
||||
* When tracing/tracing_cpu_mask is modified then this holds
|
||||
* the new bitmask we are about to install:
|
||||
*/
|
||||
static cpumask_t tracing_cpumask_new;
|
||||
static cpumask_var_t tracing_cpumask;
|
||||
|
||||
/*
|
||||
* The tracer itself will not take this lock, but still we want
|
||||
@@ -2674,7 +2668,7 @@ tracing_cpumask_read(struct file *filp, char __user *ubuf,
|
||||
|
||||
mutex_lock(&tracing_cpumask_update_lock);
|
||||
|
||||
len = cpumask_scnprintf(mask_str, count, &tracing_cpumask);
|
||||
len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
|
||||
if (count - len < 2) {
|
||||
count = -EINVAL;
|
||||
goto out_err;
|
||||
@@ -2693,9 +2687,13 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
int err, cpu;
|
||||
cpumask_var_t tracing_cpumask_new;
|
||||
|
||||
if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&tracing_cpumask_update_lock);
|
||||
err = cpumask_parse_user(ubuf, count, &tracing_cpumask_new);
|
||||
err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
|
||||
if (err)
|
||||
goto err_unlock;
|
||||
|
||||
@@ -2706,26 +2704,28 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
|
||||
* Increase/decrease the disabled counter if we are
|
||||
* about to flip a bit in the cpumask:
|
||||
*/
|
||||
if (cpu_isset(cpu, tracing_cpumask) &&
|
||||
!cpu_isset(cpu, tracing_cpumask_new)) {
|
||||
if (cpumask_test_cpu(cpu, tracing_cpumask) &&
|
||||
!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
|
||||
atomic_inc(&global_trace.data[cpu]->disabled);
|
||||
}
|
||||
if (!cpu_isset(cpu, tracing_cpumask) &&
|
||||
cpu_isset(cpu, tracing_cpumask_new)) {
|
||||
if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
|
||||
cpumask_test_cpu(cpu, tracing_cpumask_new)) {
|
||||
atomic_dec(&global_trace.data[cpu]->disabled);
|
||||
}
|
||||
}
|
||||
__raw_spin_unlock(&ftrace_max_lock);
|
||||
local_irq_enable();
|
||||
|
||||
tracing_cpumask = tracing_cpumask_new;
|
||||
cpumask_copy(tracing_cpumask, tracing_cpumask_new);
|
||||
|
||||
mutex_unlock(&tracing_cpumask_update_lock);
|
||||
free_cpumask_var(tracing_cpumask_new);
|
||||
|
||||
return count;
|
||||
|
||||
err_unlock:
|
||||
mutex_unlock(&tracing_cpumask_update_lock);
|
||||
free_cpumask_var(tracing_cpumask);
|
||||
|
||||
return err;
|
||||
}
|
||||
@@ -3752,7 +3752,6 @@ void ftrace_dump(void)
|
||||
static DEFINE_SPINLOCK(ftrace_dump_lock);
|
||||
/* use static because iter can be a bit big for the stack */
|
||||
static struct trace_iterator iter;
|
||||
static cpumask_t mask;
|
||||
static int dump_ran;
|
||||
unsigned long flags;
|
||||
int cnt = 0, cpu;
|
||||
@@ -3786,8 +3785,6 @@ void ftrace_dump(void)
|
||||
* and then release the locks again.
|
||||
*/
|
||||
|
||||
cpus_clear(mask);
|
||||
|
||||
while (!trace_empty(&iter)) {
|
||||
|
||||
if (!cnt)
|
||||
@@ -3823,19 +3820,28 @@ __init static int tracer_alloc_buffers(void)
|
||||
{
|
||||
struct trace_array_cpu *data;
|
||||
int i;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
|
||||
goto out;
|
||||
|
||||
if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
|
||||
goto out_free_buffer_mask;
|
||||
|
||||
cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
|
||||
cpumask_copy(tracing_cpumask, cpu_all_mask);
|
||||
|
||||
/* TODO: make the number of buffers hot pluggable with CPUS */
|
||||
tracing_buffer_mask = cpu_possible_map;
|
||||
|
||||
global_trace.buffer = ring_buffer_alloc(trace_buf_size,
|
||||
TRACE_BUFFER_FLAGS);
|
||||
if (!global_trace.buffer) {
|
||||
printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
|
||||
WARN_ON(1);
|
||||
return 0;
|
||||
goto out_free_cpumask;
|
||||
}
|
||||
global_trace.entries = ring_buffer_size(global_trace.buffer);
|
||||
|
||||
|
||||
#ifdef CONFIG_TRACER_MAX_TRACE
|
||||
max_tr.buffer = ring_buffer_alloc(trace_buf_size,
|
||||
TRACE_BUFFER_FLAGS);
|
||||
@@ -3843,7 +3849,7 @@ __init static int tracer_alloc_buffers(void)
|
||||
printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
|
||||
WARN_ON(1);
|
||||
ring_buffer_free(global_trace.buffer);
|
||||
return 0;
|
||||
goto out_free_cpumask;
|
||||
}
|
||||
max_tr.entries = ring_buffer_size(max_tr.buffer);
|
||||
WARN_ON(max_tr.entries != global_trace.entries);
|
||||
@@ -3873,8 +3879,14 @@ __init static int tracer_alloc_buffers(void)
|
||||
&trace_panic_notifier);
|
||||
|
||||
register_die_notifier(&trace_die_notifier);
|
||||
ret = 0;
|
||||
|
||||
return 0;
|
||||
out_free_cpumask:
|
||||
free_cpumask_var(tracing_cpumask);
|
||||
out_free_buffer_mask:
|
||||
free_cpumask_var(tracing_buffer_mask);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
early_initcall(tracer_alloc_buffers);
|
||||
fs_initcall(tracer_init_debugfs);
|
||||
|
Reference in New Issue
Block a user