tracing: Rename trace_buffer to array_buffer
As we are working to remove the generic "ring_buffer" name that is used by both tracing and perf, the ring_buffer name for tracing will be renamed to trace_buffer, and perf's ring buffer will be renamed to perf_buffer. As there already exists a trace_buffer that is used by the trace_arrays, it needs to be first renamed to array_buffer. Link: https://lore.kernel.org/r/20191213153553.GE20583@krava Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
This commit is contained in:
@@ -82,7 +82,7 @@ func_prolog_preempt_disable(struct trace_array *tr,
|
||||
if (cpu != wakeup_current_cpu)
|
||||
goto out_enable;
|
||||
|
||||
*data = per_cpu_ptr(tr->trace_buffer.data, cpu);
|
||||
*data = per_cpu_ptr(tr->array_buffer.data, cpu);
|
||||
disabled = atomic_inc_return(&(*data)->disabled);
|
||||
if (unlikely(disabled != 1))
|
||||
goto out;
|
||||
@@ -378,7 +378,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
|
||||
unsigned long flags, int pc)
|
||||
{
|
||||
struct trace_event_call *call = &event_context_switch;
|
||||
struct ring_buffer *buffer = tr->trace_buffer.buffer;
|
||||
struct ring_buffer *buffer = tr->array_buffer.buffer;
|
||||
struct ring_buffer_event *event;
|
||||
struct ctx_switch_entry *entry;
|
||||
|
||||
@@ -408,7 +408,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
|
||||
struct trace_event_call *call = &event_wakeup;
|
||||
struct ring_buffer_event *event;
|
||||
struct ctx_switch_entry *entry;
|
||||
struct ring_buffer *buffer = tr->trace_buffer.buffer;
|
||||
struct ring_buffer *buffer = tr->array_buffer.buffer;
|
||||
|
||||
event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
|
||||
sizeof(*entry), flags, pc);
|
||||
@@ -459,7 +459,7 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
|
||||
|
||||
/* disable local data, not wakeup_cpu data */
|
||||
cpu = raw_smp_processor_id();
|
||||
disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
|
||||
disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
|
||||
if (likely(disabled != 1))
|
||||
goto out;
|
||||
|
||||
@@ -471,7 +471,7 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
|
||||
goto out_unlock;
|
||||
|
||||
/* The task we are waiting for is waking up */
|
||||
data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
|
||||
data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
|
||||
|
||||
__trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
|
||||
tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
|
||||
@@ -494,7 +494,7 @@ out_unlock:
|
||||
arch_spin_unlock(&wakeup_lock);
|
||||
local_irq_restore(flags);
|
||||
out:
|
||||
atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
|
||||
atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
|
||||
}
|
||||
|
||||
static void __wakeup_reset(struct trace_array *tr)
|
||||
@@ -513,7 +513,7 @@ static void wakeup_reset(struct trace_array *tr)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
tracing_reset_online_cpus(&tr->trace_buffer);
|
||||
tracing_reset_online_cpus(&tr->array_buffer);
|
||||
|
||||
local_irq_save(flags);
|
||||
arch_spin_lock(&wakeup_lock);
|
||||
@@ -551,7 +551,7 @@ probe_wakeup(void *ignore, struct task_struct *p)
|
||||
return;
|
||||
|
||||
pc = preempt_count();
|
||||
disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
|
||||
disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
|
||||
if (unlikely(disabled != 1))
|
||||
goto out;
|
||||
|
||||
@@ -583,7 +583,7 @@ probe_wakeup(void *ignore, struct task_struct *p)
|
||||
|
||||
local_save_flags(flags);
|
||||
|
||||
data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
|
||||
data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
|
||||
data->preempt_timestamp = ftrace_now(cpu);
|
||||
tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
|
||||
__trace_stack(wakeup_trace, flags, 0, pc);
|
||||
@@ -598,7 +598,7 @@ probe_wakeup(void *ignore, struct task_struct *p)
|
||||
out_locked:
|
||||
arch_spin_unlock(&wakeup_lock);
|
||||
out:
|
||||
atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
|
||||
atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
|
||||
}
|
||||
|
||||
static void start_wakeup_tracer(struct trace_array *tr)
|
||||
|
Reference in New Issue
Block a user