tracing: Disable snapshot buffer when stopping instance tracers
commit b538bf7d0ec11ca49f536dfda742a5f6db90a798 upstream.
It use to be that only the top level instance had a snapshot buffer (for
latency tracers like wakeup and irqsoff). When stopping a tracer in an
instance would not disable the snapshot buffer. This could have some
unintended consequences if the irqsoff tracer is enabled.
Consolidate the tracing_start/stop() with tracing_start/stop_tr() so that
all instances behave the same. The tracing_start/stop() functions will
just call their respective tracing_start/stop_tr() with the global_array
passed in.
Link: https://lkml.kernel.org/r/20231205220011.041220035@goodmis.org
Cc: stable@vger.kernel.org
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Fixes: 6d9b3fa5e7
("tracing: Move tracing_max_latency into trace_array")
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:

committed by
Greg Kroah-Hartman

parent
97c2b3b232
commit
ad9efb0b27
@@ -2287,49 +2287,6 @@ int is_tracing_stopped(void)
|
|||||||
return global_trace.stop_count;
|
return global_trace.stop_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* tracing_start - quick start of the tracer
|
|
||||||
*
|
|
||||||
* If tracing is enabled but was stopped by tracing_stop,
|
|
||||||
* this will start the tracer back up.
|
|
||||||
*/
|
|
||||||
void tracing_start(void)
|
|
||||||
{
|
|
||||||
struct trace_buffer *buffer;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
if (tracing_disabled)
|
|
||||||
return;
|
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&global_trace.start_lock, flags);
|
|
||||||
if (--global_trace.stop_count) {
|
|
||||||
if (global_trace.stop_count < 0) {
|
|
||||||
/* Someone screwed up their debugging */
|
|
||||||
WARN_ON_ONCE(1);
|
|
||||||
global_trace.stop_count = 0;
|
|
||||||
}
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Prevent the buffers from switching */
|
|
||||||
arch_spin_lock(&global_trace.max_lock);
|
|
||||||
|
|
||||||
buffer = global_trace.array_buffer.buffer;
|
|
||||||
if (buffer)
|
|
||||||
ring_buffer_record_enable(buffer);
|
|
||||||
|
|
||||||
#ifdef CONFIG_TRACER_MAX_TRACE
|
|
||||||
buffer = global_trace.max_buffer.buffer;
|
|
||||||
if (buffer)
|
|
||||||
ring_buffer_record_enable(buffer);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
arch_spin_unlock(&global_trace.max_lock);
|
|
||||||
|
|
||||||
out:
|
|
||||||
raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void tracing_start_tr(struct trace_array *tr)
|
static void tracing_start_tr(struct trace_array *tr)
|
||||||
{
|
{
|
||||||
struct trace_buffer *buffer;
|
struct trace_buffer *buffer;
|
||||||
@@ -2338,25 +2295,70 @@ static void tracing_start_tr(struct trace_array *tr)
|
|||||||
if (tracing_disabled)
|
if (tracing_disabled)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* If global, we need to also start the max tracer */
|
|
||||||
if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
|
|
||||||
return tracing_start();
|
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&tr->start_lock, flags);
|
raw_spin_lock_irqsave(&tr->start_lock, flags);
|
||||||
|
|
||||||
if (--tr->stop_count) {
|
if (--tr->stop_count) {
|
||||||
if (tr->stop_count < 0) {
|
if (WARN_ON_ONCE(tr->stop_count < 0)) {
|
||||||
/* Someone screwed up their debugging */
|
/* Someone screwed up their debugging */
|
||||||
WARN_ON_ONCE(1);
|
|
||||||
tr->stop_count = 0;
|
tr->stop_count = 0;
|
||||||
}
|
}
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Prevent the buffers from switching */
|
||||||
|
arch_spin_lock(&tr->max_lock);
|
||||||
|
|
||||||
buffer = tr->array_buffer.buffer;
|
buffer = tr->array_buffer.buffer;
|
||||||
if (buffer)
|
if (buffer)
|
||||||
ring_buffer_record_enable(buffer);
|
ring_buffer_record_enable(buffer);
|
||||||
|
|
||||||
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
||||||
|
buffer = tr->max_buffer.buffer;
|
||||||
|
if (buffer)
|
||||||
|
ring_buffer_record_enable(buffer);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
arch_spin_unlock(&tr->max_lock);
|
||||||
|
|
||||||
|
out:
|
||||||
|
raw_spin_unlock_irqrestore(&tr->start_lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* tracing_start - quick start of the tracer
|
||||||
|
*
|
||||||
|
* If tracing is enabled but was stopped by tracing_stop,
|
||||||
|
* this will start the tracer back up.
|
||||||
|
*/
|
||||||
|
void tracing_start(void)
|
||||||
|
|
||||||
|
{
|
||||||
|
return tracing_start_tr(&global_trace);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void tracing_stop_tr(struct trace_array *tr)
|
||||||
|
{
|
||||||
|
struct trace_buffer *buffer;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
raw_spin_lock_irqsave(&tr->start_lock, flags);
|
||||||
|
if (tr->stop_count++)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
/* Prevent the buffers from switching */
|
||||||
|
arch_spin_lock(&tr->max_lock);
|
||||||
|
|
||||||
|
buffer = tr->array_buffer.buffer;
|
||||||
|
if (buffer)
|
||||||
|
ring_buffer_record_disable(buffer);
|
||||||
|
|
||||||
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
||||||
|
buffer = tr->max_buffer.buffer;
|
||||||
|
if (buffer)
|
||||||
|
ring_buffer_record_disable(buffer);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
arch_spin_unlock(&tr->max_lock);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
raw_spin_unlock_irqrestore(&tr->start_lock, flags);
|
raw_spin_unlock_irqrestore(&tr->start_lock, flags);
|
||||||
}
|
}
|
||||||
@@ -2369,51 +2371,7 @@ static void tracing_start_tr(struct trace_array *tr)
|
|||||||
*/
|
*/
|
||||||
void tracing_stop(void)
|
void tracing_stop(void)
|
||||||
{
|
{
|
||||||
struct trace_buffer *buffer;
|
return tracing_stop_tr(&global_trace);
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&global_trace.start_lock, flags);
|
|
||||||
if (global_trace.stop_count++)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
/* Prevent the buffers from switching */
|
|
||||||
arch_spin_lock(&global_trace.max_lock);
|
|
||||||
|
|
||||||
buffer = global_trace.array_buffer.buffer;
|
|
||||||
if (buffer)
|
|
||||||
ring_buffer_record_disable(buffer);
|
|
||||||
|
|
||||||
#ifdef CONFIG_TRACER_MAX_TRACE
|
|
||||||
buffer = global_trace.max_buffer.buffer;
|
|
||||||
if (buffer)
|
|
||||||
ring_buffer_record_disable(buffer);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
arch_spin_unlock(&global_trace.max_lock);
|
|
||||||
|
|
||||||
out:
|
|
||||||
raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void tracing_stop_tr(struct trace_array *tr)
|
|
||||||
{
|
|
||||||
struct trace_buffer *buffer;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
/* If global, we need to also stop the max tracer */
|
|
||||||
if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
|
|
||||||
return tracing_stop();
|
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&tr->start_lock, flags);
|
|
||||||
if (tr->stop_count++)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
buffer = tr->array_buffer.buffer;
|
|
||||||
if (buffer)
|
|
||||||
ring_buffer_record_disable(buffer);
|
|
||||||
|
|
||||||
out:
|
|
||||||
raw_spin_unlock_irqrestore(&tr->start_lock, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int trace_save_cmdline(struct task_struct *tsk)
|
static int trace_save_cmdline(struct task_struct *tsk)
|
||||||
|
Reference in New Issue
Block a user