Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (162 commits) tracing/kprobes: unregister_trace_probe needs to be called under mutex perf: expose event__process function perf events: Fix mmap offset determination perf, powerpc: fsl_emb: Restore setting perf_sample_data.period perf, powerpc: Convert the FSL driver to use local64_t perf tools: Don't keep unreferenced maps when unmaps are detected perf session: Invalidate last_match when removing threads from rb_tree perf session: Free the ref_reloc_sym memory at the right place x86,mmiotrace: Add support for tracing STOS instruction perf, sched migration: Librarize task states and event headers helpers perf, sched migration: Librarize the GUI class perf, sched migration: Make the GUI class client agnostic perf, sched migration: Make it vertically scrollable perf, sched migration: Parameterize cpu height and spacing perf, sched migration: Fix key bindings perf, sched migration: Ignore unhandled task states perf, sched migration: Handle ignored migrate out events perf: New migration tool overview tracing: Drop cpparg() macro perf: Use tracepoint_synchronize_unregister() to flush any pending tracepoint call ... Fix up trivial conflicts in Makefile and drivers/cpufreq/cpufreq.c
This commit is contained in:
@@ -1,3 +1,8 @@
|
||||
/*
|
||||
* Ftrace header. For implementation details beyond the random comments
|
||||
* scattered below, see: Documentation/trace/ftrace-design.txt
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_FTRACE_H
|
||||
#define _LINUX_FTRACE_H
|
||||
|
||||
|
@@ -11,8 +11,6 @@ struct trace_array;
|
||||
struct tracer;
|
||||
struct dentry;
|
||||
|
||||
DECLARE_PER_CPU(struct trace_seq, ftrace_event_seq);
|
||||
|
||||
struct trace_print_flags {
|
||||
unsigned long mask;
|
||||
const char *name;
|
||||
@@ -58,6 +56,9 @@ struct trace_iterator {
|
||||
struct ring_buffer_iter *buffer_iter[NR_CPUS];
|
||||
unsigned long iter_flags;
|
||||
|
||||
/* trace_seq for __print_flags() and __print_symbolic() etc. */
|
||||
struct trace_seq tmp_seq;
|
||||
|
||||
/* The below is zeroed out in pipe_read */
|
||||
struct trace_seq seq;
|
||||
struct trace_entry *ent;
|
||||
@@ -146,14 +147,19 @@ struct ftrace_event_class {
|
||||
int (*raw_init)(struct ftrace_event_call *);
|
||||
};
|
||||
|
||||
extern int ftrace_event_reg(struct ftrace_event_call *event,
|
||||
enum trace_reg type);
|
||||
|
||||
enum {
|
||||
TRACE_EVENT_FL_ENABLED_BIT,
|
||||
TRACE_EVENT_FL_FILTERED_BIT,
|
||||
TRACE_EVENT_FL_RECORDED_CMD_BIT,
|
||||
};
|
||||
|
||||
enum {
|
||||
TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT),
|
||||
TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
|
||||
TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT),
|
||||
TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
|
||||
TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT),
|
||||
};
|
||||
|
||||
struct ftrace_event_call {
|
||||
@@ -171,6 +177,7 @@ struct ftrace_event_call {
|
||||
* 32 bit flags:
|
||||
* bit 1: enabled
|
||||
* bit 2: filter_active
|
||||
* bit 3: enabled cmd record
|
||||
*
|
||||
* Changes to flags must hold the event_mutex.
|
||||
*
|
||||
@@ -257,8 +264,7 @@ static inline void
|
||||
perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
|
||||
u64 count, struct pt_regs *regs, void *head)
|
||||
{
|
||||
perf_tp_event(addr, count, raw_data, size, regs, head);
|
||||
perf_swevent_put_recursion_context(rctx);
|
||||
perf_tp_event(addr, count, raw_data, size, regs, head, rctx);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@@ -513,9 +513,6 @@ extern void tracing_start(void);
|
||||
extern void tracing_stop(void);
|
||||
extern void ftrace_off_permanent(void);
|
||||
|
||||
extern void
|
||||
ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
|
||||
|
||||
static inline void __attribute__ ((format (printf, 1, 2)))
|
||||
____trace_printk_check_format(const char *fmt, ...)
|
||||
{
|
||||
@@ -591,8 +588,6 @@ __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
|
||||
|
||||
extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
|
||||
#else
|
||||
static inline void
|
||||
ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
|
||||
static inline int
|
||||
trace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
|
||||
|
||||
|
@@ -1,25 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2008 Eduard - Gabriel Munteanu
|
||||
*
|
||||
* This file is released under GPL version 2.
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_KMEMTRACE_H
|
||||
#define _LINUX_KMEMTRACE_H
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <trace/events/kmem.h>
|
||||
|
||||
#ifdef CONFIG_KMEMTRACE
|
||||
extern void kmemtrace_init(void);
|
||||
#else
|
||||
static inline void kmemtrace_init(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _LINUX_KMEMTRACE_H */
|
||||
|
@@ -20,10 +20,14 @@ extern void touch_nmi_watchdog(void);
|
||||
extern void acpi_nmi_disable(void);
|
||||
extern void acpi_nmi_enable(void);
|
||||
#else
|
||||
#ifndef CONFIG_HARDLOCKUP_DETECTOR
|
||||
static inline void touch_nmi_watchdog(void)
|
||||
{
|
||||
touch_softlockup_watchdog();
|
||||
}
|
||||
#else
|
||||
extern void touch_nmi_watchdog(void);
|
||||
#endif
|
||||
static inline void acpi_nmi_disable(void) { }
|
||||
static inline void acpi_nmi_enable(void) { }
|
||||
#endif
|
||||
@@ -47,4 +51,13 @@ static inline bool trigger_all_cpu_backtrace(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_LOCKUP_DETECTOR
|
||||
int hw_nmi_is_cpu_stuck(struct pt_regs *);
|
||||
u64 hw_nmi_get_sample_period(void);
|
||||
extern int watchdog_enabled;
|
||||
struct ctl_table;
|
||||
extern int proc_dowatchdog_enabled(struct ctl_table *, int ,
|
||||
void __user *, size_t *, loff_t *);
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@@ -214,8 +214,9 @@ struct perf_event_attr {
|
||||
* See also PERF_RECORD_MISC_EXACT_IP
|
||||
*/
|
||||
precise_ip : 2, /* skid constraint */
|
||||
mmap_data : 1, /* non-exec mmap data */
|
||||
|
||||
__reserved_1 : 47;
|
||||
__reserved_1 : 46;
|
||||
|
||||
union {
|
||||
__u32 wakeup_events; /* wakeup every n events */
|
||||
@@ -461,6 +462,7 @@ enum perf_callchain_context {
|
||||
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
# include <asm/perf_event.h>
|
||||
# include <asm/local64.h>
|
||||
#endif
|
||||
|
||||
struct perf_guest_info_callbacks {
|
||||
@@ -531,14 +533,16 @@ struct hw_perf_event {
|
||||
struct hrtimer hrtimer;
|
||||
};
|
||||
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||
/* breakpoint */
|
||||
struct arch_hw_breakpoint info;
|
||||
struct { /* breakpoint */
|
||||
struct arch_hw_breakpoint info;
|
||||
struct list_head bp_list;
|
||||
};
|
||||
#endif
|
||||
};
|
||||
atomic64_t prev_count;
|
||||
local64_t prev_count;
|
||||
u64 sample_period;
|
||||
u64 last_period;
|
||||
atomic64_t period_left;
|
||||
local64_t period_left;
|
||||
u64 interrupts;
|
||||
|
||||
u64 freq_time_stamp;
|
||||
@@ -548,7 +552,10 @@ struct hw_perf_event {
|
||||
|
||||
struct perf_event;
|
||||
|
||||
#define PERF_EVENT_TXN_STARTED 1
|
||||
/*
|
||||
* Common implementation detail of pmu::{start,commit,cancel}_txn
|
||||
*/
|
||||
#define PERF_EVENT_TXN 0x1
|
||||
|
||||
/**
|
||||
* struct pmu - generic performance monitoring unit
|
||||
@@ -562,14 +569,28 @@ struct pmu {
|
||||
void (*unthrottle) (struct perf_event *event);
|
||||
|
||||
/*
|
||||
* group events scheduling is treated as a transaction,
|
||||
* add group events as a whole and perform one schedulability test.
|
||||
* If test fails, roll back the whole group
|
||||
* Group events scheduling is treated as a transaction, add group
|
||||
* events as a whole and perform one schedulability test. If the test
|
||||
* fails, roll back the whole group
|
||||
*/
|
||||
|
||||
/*
|
||||
* Start the transaction, after this ->enable() doesn't need
|
||||
* to do schedulability tests.
|
||||
*/
|
||||
void (*start_txn) (const struct pmu *pmu);
|
||||
void (*cancel_txn) (const struct pmu *pmu);
|
||||
/*
|
||||
* If ->start_txn() disabled the ->enable() schedulability test
|
||||
* then ->commit_txn() is required to perform one. On success
|
||||
* the transaction is closed. On error the transaction is kept
|
||||
* open until ->cancel_txn() is called.
|
||||
*/
|
||||
int (*commit_txn) (const struct pmu *pmu);
|
||||
/*
|
||||
* Will cancel the transaction, assumes ->disable() is called for
|
||||
* each successfull ->enable() during the transaction.
|
||||
*/
|
||||
void (*cancel_txn) (const struct pmu *pmu);
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -584,7 +605,9 @@ enum perf_event_active_state {
|
||||
|
||||
struct file;
|
||||
|
||||
struct perf_mmap_data {
|
||||
#define PERF_BUFFER_WRITABLE 0x01
|
||||
|
||||
struct perf_buffer {
|
||||
atomic_t refcount;
|
||||
struct rcu_head rcu_head;
|
||||
#ifdef CONFIG_PERF_USE_VMALLOC
|
||||
@@ -650,7 +673,8 @@ struct perf_event {
|
||||
|
||||
enum perf_event_active_state state;
|
||||
unsigned int attach_state;
|
||||
atomic64_t count;
|
||||
local64_t count;
|
||||
atomic64_t child_count;
|
||||
|
||||
/*
|
||||
* These are the total time in nanoseconds that the event
|
||||
@@ -709,7 +733,7 @@ struct perf_event {
|
||||
atomic_t mmap_count;
|
||||
int mmap_locked;
|
||||
struct user_struct *mmap_user;
|
||||
struct perf_mmap_data *data;
|
||||
struct perf_buffer *buffer;
|
||||
|
||||
/* poll related */
|
||||
wait_queue_head_t waitq;
|
||||
@@ -807,7 +831,7 @@ struct perf_cpu_context {
|
||||
|
||||
struct perf_output_handle {
|
||||
struct perf_event *event;
|
||||
struct perf_mmap_data *data;
|
||||
struct perf_buffer *buffer;
|
||||
unsigned long wakeup;
|
||||
unsigned long size;
|
||||
void *addr;
|
||||
@@ -910,8 +934,10 @@ extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
|
||||
|
||||
extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
|
||||
|
||||
extern void
|
||||
perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip);
|
||||
#ifndef perf_arch_fetch_caller_regs
|
||||
static inline void
|
||||
perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Take a snapshot of the regs. Skip ip and frame pointer to
|
||||
@@ -921,31 +947,11 @@ perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip);
|
||||
* - bp for callchains
|
||||
* - eflags, for future purposes, just in case
|
||||
*/
|
||||
static inline void perf_fetch_caller_regs(struct pt_regs *regs, int skip)
|
||||
static inline void perf_fetch_caller_regs(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long ip;
|
||||
|
||||
memset(regs, 0, sizeof(*regs));
|
||||
|
||||
switch (skip) {
|
||||
case 1 :
|
||||
ip = CALLER_ADDR0;
|
||||
break;
|
||||
case 2 :
|
||||
ip = CALLER_ADDR1;
|
||||
break;
|
||||
case 3 :
|
||||
ip = CALLER_ADDR2;
|
||||
break;
|
||||
case 4:
|
||||
ip = CALLER_ADDR3;
|
||||
break;
|
||||
/* No need to support further for now */
|
||||
default:
|
||||
ip = 0;
|
||||
}
|
||||
|
||||
return perf_arch_fetch_caller_regs(regs, ip, skip);
|
||||
perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
|
||||
}
|
||||
|
||||
static inline void
|
||||
@@ -955,21 +961,14 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
|
||||
struct pt_regs hot_regs;
|
||||
|
||||
if (!regs) {
|
||||
perf_fetch_caller_regs(&hot_regs, 1);
|
||||
perf_fetch_caller_regs(&hot_regs);
|
||||
regs = &hot_regs;
|
||||
}
|
||||
__perf_sw_event(event_id, nr, nmi, regs, addr);
|
||||
}
|
||||
}
|
||||
|
||||
extern void __perf_event_mmap(struct vm_area_struct *vma);
|
||||
|
||||
static inline void perf_event_mmap(struct vm_area_struct *vma)
|
||||
{
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
__perf_event_mmap(vma);
|
||||
}
|
||||
|
||||
extern void perf_event_mmap(struct vm_area_struct *vma);
|
||||
extern struct perf_guest_info_callbacks *perf_guest_cbs;
|
||||
extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
|
||||
extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
|
||||
@@ -1001,7 +1000,7 @@ static inline bool perf_paranoid_kernel(void)
|
||||
extern void perf_event_init(void);
|
||||
extern void perf_tp_event(u64 addr, u64 count, void *record,
|
||||
int entry_size, struct pt_regs *regs,
|
||||
struct hlist_head *head);
|
||||
struct hlist_head *head, int rctx);
|
||||
extern void perf_bp_event(struct perf_event *event, void *data);
|
||||
|
||||
#ifndef perf_misc_flags
|
||||
|
@@ -316,20 +316,16 @@ extern void scheduler_tick(void);
|
||||
|
||||
extern void sched_show_task(struct task_struct *p);
|
||||
|
||||
#ifdef CONFIG_DETECT_SOFTLOCKUP
|
||||
extern void softlockup_tick(void);
|
||||
#ifdef CONFIG_LOCKUP_DETECTOR
|
||||
extern void touch_softlockup_watchdog(void);
|
||||
extern void touch_softlockup_watchdog_sync(void);
|
||||
extern void touch_all_softlockup_watchdogs(void);
|
||||
extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
|
||||
void __user *buffer,
|
||||
size_t *lenp, loff_t *ppos);
|
||||
extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
|
||||
void __user *buffer,
|
||||
size_t *lenp, loff_t *ppos);
|
||||
extern unsigned int softlockup_panic;
|
||||
extern int softlockup_thresh;
|
||||
#else
|
||||
static inline void softlockup_tick(void)
|
||||
{
|
||||
}
|
||||
static inline void touch_softlockup_watchdog(void)
|
||||
{
|
||||
}
|
||||
@@ -2435,18 +2431,6 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
extern void
|
||||
__trace_special(void *__tr, void *__data,
|
||||
unsigned long arg1, unsigned long arg2, unsigned long arg3);
|
||||
#else
|
||||
static inline void
|
||||
__trace_special(void *__tr, void *__data,
|
||||
unsigned long arg1, unsigned long arg2, unsigned long arg3)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
|
||||
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
|
||||
|
||||
|
@@ -14,7 +14,8 @@
|
||||
#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
|
||||
#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/kmemtrace.h>
|
||||
|
||||
#include <trace/events/kmem.h>
|
||||
|
||||
#ifndef ARCH_KMALLOC_MINALIGN
|
||||
/*
|
||||
|
@@ -10,9 +10,10 @@
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/kmemtrace.h>
|
||||
#include <linux/kmemleak.h>
|
||||
|
||||
#include <trace/events/kmem.h>
|
||||
|
||||
enum stat_item {
|
||||
ALLOC_FASTPATH, /* Allocation from cpu slab */
|
||||
ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
|
||||
|
@@ -167,7 +167,6 @@ extern struct trace_event_functions exit_syscall_print_funcs;
|
||||
.enter_event = &event_enter_##sname, \
|
||||
.exit_event = &event_exit_##sname, \
|
||||
.enter_fields = LIST_HEAD_INIT(__syscall_meta_##sname.enter_fields), \
|
||||
.exit_fields = LIST_HEAD_INIT(__syscall_meta_##sname.exit_fields), \
|
||||
};
|
||||
|
||||
#define SYSCALL_DEFINE0(sname) \
|
||||
@@ -182,7 +181,6 @@ extern struct trace_event_functions exit_syscall_print_funcs;
|
||||
.enter_event = &event_enter__##sname, \
|
||||
.exit_event = &event_exit__##sname, \
|
||||
.enter_fields = LIST_HEAD_INIT(__syscall_meta__##sname.enter_fields), \
|
||||
.exit_fields = LIST_HEAD_INIT(__syscall_meta__##sname.exit_fields), \
|
||||
}; \
|
||||
asmlinkage long sys_##sname(void)
|
||||
#else
|
||||
|
Reference in New Issue
Block a user