Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Ingo Molnar: "Fix merge window fallout and fix sleep profiling (this was always broken, so it's not a fix for the merge window - we can skip this one from the head of the tree)." * 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/trace: Add ability to set a target task for events perf/x86: Fix USER/KERNEL tagging of samples properly perf/x86/intel/uncore: Make UNCORE_PMU_HRTIMER_INTERVAL 64-bit
This commit is contained in:
@@ -32,6 +32,8 @@
|
||||
#include <asm/smp.h>
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/timer.h>
|
||||
#include <asm/desc.h>
|
||||
#include <asm/ldt.h>
|
||||
|
||||
#include "perf_event.h"
|
||||
|
||||
@@ -1738,6 +1740,29 @@ valid_user_frame(const void __user *fp, unsigned long size)
|
||||
return (__range_not_ok(fp, size, TASK_SIZE) == 0);
|
||||
}
|
||||
|
||||
static unsigned long get_segment_base(unsigned int segment)
|
||||
{
|
||||
struct desc_struct *desc;
|
||||
int idx = segment >> 3;
|
||||
|
||||
if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
|
||||
if (idx > LDT_ENTRIES)
|
||||
return 0;
|
||||
|
||||
if (idx > current->active_mm->context.size)
|
||||
return 0;
|
||||
|
||||
desc = current->active_mm->context.ldt;
|
||||
} else {
|
||||
if (idx > GDT_ENTRIES)
|
||||
return 0;
|
||||
|
||||
desc = __this_cpu_ptr(&gdt_page.gdt[0]);
|
||||
}
|
||||
|
||||
return get_desc_base(desc + idx);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
||||
#include <asm/compat.h>
|
||||
@@ -1746,13 +1771,17 @@ static inline int
|
||||
perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
|
||||
{
|
||||
/* 32-bit process in 64-bit kernel. */
|
||||
unsigned long ss_base, cs_base;
|
||||
struct stack_frame_ia32 frame;
|
||||
const void __user *fp;
|
||||
|
||||
if (!test_thread_flag(TIF_IA32))
|
||||
return 0;
|
||||
|
||||
fp = compat_ptr(regs->bp);
|
||||
cs_base = get_segment_base(regs->cs);
|
||||
ss_base = get_segment_base(regs->ss);
|
||||
|
||||
fp = compat_ptr(ss_base + regs->bp);
|
||||
while (entry->nr < PERF_MAX_STACK_DEPTH) {
|
||||
unsigned long bytes;
|
||||
frame.next_frame = 0;
|
||||
@@ -1765,8 +1794,8 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
|
||||
if (!valid_user_frame(fp, sizeof(frame)))
|
||||
break;
|
||||
|
||||
perf_callchain_store(entry, frame.return_address);
|
||||
fp = compat_ptr(frame.next_frame);
|
||||
perf_callchain_store(entry, cs_base + frame.return_address);
|
||||
fp = compat_ptr(ss_base + frame.next_frame);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
@@ -1789,6 +1818,12 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* We don't know what to do with VM86 stacks.. ignore them for now.
|
||||
*/
|
||||
if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
|
||||
return;
|
||||
|
||||
fp = (void __user *)regs->bp;
|
||||
|
||||
perf_callchain_store(entry, regs->ip);
|
||||
@@ -1816,16 +1851,50 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Deal with code segment offsets for the various execution modes:
|
||||
*
|
||||
* VM86 - the good olde 16 bit days, where the linear address is
|
||||
* 20 bits and we use regs->ip + 0x10 * regs->cs.
|
||||
*
|
||||
* IA32 - Where we need to look at GDT/LDT segment descriptor tables
|
||||
* to figure out what the 32bit base address is.
|
||||
*
|
||||
* X32 - has TIF_X32 set, but is running in x86_64
|
||||
*
|
||||
* X86_64 - CS,DS,SS,ES are all zero based.
|
||||
*/
|
||||
static unsigned long code_segment_base(struct pt_regs *regs)
|
||||
{
|
||||
/*
|
||||
* If we are in VM86 mode, add the segment offset to convert to a
|
||||
* linear address.
|
||||
*/
|
||||
if (regs->flags & X86_VM_MASK)
|
||||
return 0x10 * regs->cs;
|
||||
|
||||
/*
|
||||
* For IA32 we look at the GDT/LDT segment base to convert the
|
||||
* effective IP to a linear address.
|
||||
*/
|
||||
#ifdef CONFIG_X86_32
|
||||
if (user_mode(regs) && regs->cs != __USER_CS)
|
||||
return get_segment_base(regs->cs);
|
||||
#else
|
||||
if (test_thread_flag(TIF_IA32)) {
|
||||
if (user_mode(regs) && regs->cs != __USER32_CS)
|
||||
return get_segment_base(regs->cs);
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned long perf_instruction_pointer(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long ip;
|
||||
|
||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
|
||||
ip = perf_guest_cbs->get_guest_ip();
|
||||
else
|
||||
ip = instruction_pointer(regs);
|
||||
return perf_guest_cbs->get_guest_ip();
|
||||
|
||||
return ip;
|
||||
return regs->ip + code_segment_base(regs);
|
||||
}
|
||||
|
||||
unsigned long perf_misc_flags(struct pt_regs *regs)
|
||||
@@ -1838,7 +1907,7 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
|
||||
else
|
||||
misc |= PERF_RECORD_MISC_GUEST_KERNEL;
|
||||
} else {
|
||||
if (!kernel_ip(regs->ip))
|
||||
if (user_mode(regs))
|
||||
misc |= PERF_RECORD_MISC_USER;
|
||||
else
|
||||
misc |= PERF_RECORD_MISC_KERNEL;
|
||||
|
@@ -516,6 +516,26 @@ static inline bool kernel_ip(unsigned long ip)
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Not all PMUs provide the right context information to place the reported IP
|
||||
* into full context. Specifically segment registers are typically not
|
||||
* supplied.
|
||||
*
|
||||
* Assuming the address is a linear address (it is for IBS), we fake the CS and
|
||||
* vm86 mode using the known zero-based code segment and 'fix up' the registers
|
||||
* to reflect this.
|
||||
*
|
||||
* Intel PEBS/LBR appear to typically provide the effective address, nothing
|
||||
* much we can do about that but pray and treat it like a linear address.
|
||||
*/
|
||||
static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
|
||||
{
|
||||
regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS;
|
||||
if (regs->flags & X86_VM_MASK)
|
||||
regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK);
|
||||
regs->ip = ip;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CPU_SUP_AMD
|
||||
|
||||
int amd_pmu_init(void);
|
||||
|
@@ -13,6 +13,8 @@
|
||||
|
||||
#include <asm/apic.h>
|
||||
|
||||
#include "perf_event.h"
|
||||
|
||||
static u32 ibs_caps;
|
||||
|
||||
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
|
||||
@@ -536,7 +538,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
|
||||
if (check_rip && (ibs_data.regs[2] & IBS_RIP_INVALID)) {
|
||||
regs.flags &= ~PERF_EFLAGS_EXACT;
|
||||
} else {
|
||||
instruction_pointer_set(®s, ibs_data.regs[1]);
|
||||
set_linear_ip(®s, ibs_data.regs[1]);
|
||||
regs.flags |= PERF_EFLAGS_EXACT;
|
||||
}
|
||||
|
||||
|
@@ -499,7 +499,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
|
||||
* We sampled a branch insn, rewind using the LBR stack
|
||||
*/
|
||||
if (ip == to) {
|
||||
regs->ip = from;
|
||||
set_linear_ip(regs, from);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -529,7 +529,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
|
||||
} while (to < ip);
|
||||
|
||||
if (to == ip) {
|
||||
regs->ip = old_to;
|
||||
set_linear_ip(regs, old_to);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -569,7 +569,8 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
|
||||
* A possible PERF_SAMPLE_REGS will have to transfer all regs.
|
||||
*/
|
||||
regs = *iregs;
|
||||
regs.ip = pebs->ip;
|
||||
regs.flags = pebs->flags;
|
||||
set_linear_ip(®s, pebs->ip);
|
||||
regs.bp = pebs->bp;
|
||||
regs.sp = pebs->sp;
|
||||
|
||||
|
@@ -5,7 +5,7 @@
|
||||
#include "perf_event.h"
|
||||
|
||||
#define UNCORE_PMU_NAME_LEN 32
|
||||
#define UNCORE_PMU_HRTIMER_INTERVAL (60 * NSEC_PER_SEC)
|
||||
#define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC)
|
||||
|
||||
#define UNCORE_FIXED_EVENT 0xff
|
||||
#define UNCORE_PMC_IDX_MAX_GENERIC 8
|
||||
|
Reference in New Issue
Block a user