Merge tag 'trace-v4.10' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt: "This release has a few updates: - STM can hook into the function tracer - Function filtering now supports more advance glob matching - Ftrace selftests updates and added tests - Softirq tag in traces now show only softirqs - ARM nop added to non traced locations at compile time - New trace_marker_raw file that allows for binary input - Optimizations to the ring buffer - Removal of kmap in trace_marker - Wakeup and irqsoff tracers now adhere to the set_graph_notrace file - Other various fixes and clean ups" * tag 'trace-v4.10' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (42 commits) selftests: ftrace: Shift down default message verbosity kprobes/trace: Fix kprobe selftest for newer gcc tracing/kprobes: Add a helper method to return number of probe hits tracing/rb: Init the CPU mask on allocation tracing: Use SOFTIRQ_OFFSET for softirq dectection for more accurate results tracing/fgraph: Have wakeup and irqsoff tracers ignore graph functions too fgraph: Handle a case where a tracer ignores set_graph_notrace tracing: Replace kmap with copy_from_user() in trace_marker writing ftrace/x86_32: Set ftrace_stub to weak to prevent gcc from using short jumps to it tracing: Allow benchmark to be enabled at early_initcall() tracing: Have system enable return error if one of the events fail tracing: Do not start benchmark on boot up tracing: Have the reg function allow to fail ring-buffer: Force rb_end_commit() and rb_set_commit_to_write() inline ring-buffer: Froce rb_update_write_stamp() to be inlined ring-buffer: Force inline of hotpath helper functions tracing: Make __buffer_unlock_commit() always_inline tracing: Make tracepoint_printk a static_key ring-buffer: Always inline rb_event_data() ring-buffer: Make rb_reserve_next_event() always inlined ...
此提交包含在:
@@ -6,9 +6,10 @@
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
struct static_key opal_tracepoint_key = STATIC_KEY_INIT;
|
||||
|
||||
void opal_tracepoint_regfunc(void)
|
||||
int opal_tracepoint_regfunc(void)
|
||||
{
|
||||
static_key_slow_inc(&opal_tracepoint_key);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void opal_tracepoint_unregfunc(void)
|
||||
@@ -25,9 +26,10 @@ void opal_tracepoint_unregfunc(void)
|
||||
/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
|
||||
extern long opal_tracepoint_refcount;
|
||||
|
||||
void opal_tracepoint_regfunc(void)
|
||||
int opal_tracepoint_regfunc(void)
|
||||
{
|
||||
opal_tracepoint_refcount++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void opal_tracepoint_unregfunc(void)
|
||||
|
@@ -661,9 +661,10 @@ EXPORT_SYMBOL(arch_free_page);
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
struct static_key hcall_tracepoint_key = STATIC_KEY_INIT;
|
||||
|
||||
void hcall_tracepoint_regfunc(void)
|
||||
int hcall_tracepoint_regfunc(void)
|
||||
{
|
||||
static_key_slow_inc(&hcall_tracepoint_key);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void hcall_tracepoint_unregfunc(void)
|
||||
@@ -680,9 +681,10 @@ void hcall_tracepoint_unregfunc(void)
|
||||
/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
|
||||
extern long hcall_tracepoint_refcount;
|
||||
|
||||
void hcall_tracepoint_regfunc(void)
|
||||
int hcall_tracepoint_regfunc(void)
|
||||
{
|
||||
hcall_tracepoint_refcount++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void hcall_tracepoint_unregfunc(void)
|
||||
|
新增問題並參考
封鎖使用者