Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Ingo Molnar: "Misc race fixes uncovered by fuzzing efforts, a Sparse fix, two PMU driver fixes, plus miscellanous tooling fixes" * 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/x86: Reject non sampling events with precise_ip perf/x86/intel: Account interrupts for PEBS errors perf/core: Fix concurrent sys_perf_event_open() vs. 'move_group' race perf/core: Fix sys_perf_event_open() vs. hotplug perf/x86/intel: Use ULL constant to prevent undefined shift behaviour perf/x86/intel/uncore: Fix hardcoded socket 0 assumption in the Haswell init code perf/x86: Set pmu->module in Intel PMU modules perf probe: Fix to probe on gcc generated symbols for offline kernel perf probe: Fix --funcs to show correct symbols for offline module perf symbols: Robustify reading of build-id from sysfs perf tools: Install tools/lib/traceevent plugins with install-bin tools lib traceevent: Fix prev/next_prio for deadline tasks perf record: Fix --switch-output documentation and comment perf record: Make __record_options static tools lib subcmd: Add OPT_STRING_OPTARG_SET option perf probe: Fix to get correct modname from elf header samples/bpf trace_output_user: Remove duplicate sys/ioctl.h include samples/bpf sock_example: Avoid getting ethhdr from two includes perf sched timehist: Show total scheduling time
This commit is contained in:
@@ -505,6 +505,10 @@ int x86_pmu_hw_config(struct perf_event *event)
|
||||
|
||||
if (event->attr.precise_ip > precise)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* There's no sense in having PEBS for non sampling events: */
|
||||
if (!is_sampling_event(event))
|
||||
return -EINVAL;
|
||||
}
|
||||
/*
|
||||
* check that PEBS LBR correction does not conflict with
|
||||
|
@@ -3987,7 +3987,7 @@ __init int intel_pmu_init(void)
|
||||
x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
|
||||
x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
|
||||
}
|
||||
x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
|
||||
x86_pmu.intel_ctrl = (1ULL << x86_pmu.num_counters) - 1;
|
||||
|
||||
if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
|
||||
WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
|
||||
|
@@ -434,6 +434,7 @@ static struct pmu cstate_core_pmu = {
|
||||
.stop = cstate_pmu_event_stop,
|
||||
.read = cstate_pmu_event_update,
|
||||
.capabilities = PERF_PMU_CAP_NO_INTERRUPT,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static struct pmu cstate_pkg_pmu = {
|
||||
@@ -447,6 +448,7 @@ static struct pmu cstate_pkg_pmu = {
|
||||
.stop = cstate_pmu_event_stop,
|
||||
.read = cstate_pmu_event_update,
|
||||
.capabilities = PERF_PMU_CAP_NO_INTERRUPT,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static const struct cstate_model nhm_cstates __initconst = {
|
||||
|
@@ -1389,9 +1389,13 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
|
||||
continue;
|
||||
|
||||
/* log dropped samples number */
|
||||
if (error[bit])
|
||||
if (error[bit]) {
|
||||
perf_log_lost_samples(event, error[bit]);
|
||||
|
||||
if (perf_event_account_interrupt(event))
|
||||
x86_pmu_stop(event, 0);
|
||||
}
|
||||
|
||||
if (counts[bit]) {
|
||||
__intel_pmu_pebs_event(event, iregs, base,
|
||||
top, bit, counts[bit]);
|
||||
|
@@ -697,6 +697,7 @@ static int __init init_rapl_pmus(void)
|
||||
rapl_pmus->pmu.start = rapl_pmu_event_start;
|
||||
rapl_pmus->pmu.stop = rapl_pmu_event_stop;
|
||||
rapl_pmus->pmu.read = rapl_pmu_event_read;
|
||||
rapl_pmus->pmu.module = THIS_MODULE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -733,6 +733,7 @@ static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
|
||||
.start = uncore_pmu_event_start,
|
||||
.stop = uncore_pmu_event_stop,
|
||||
.read = uncore_pmu_event_read,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
} else {
|
||||
pmu->pmu = *pmu->type->pmu;
|
||||
|
@@ -2686,7 +2686,7 @@ static struct intel_uncore_type *hswep_msr_uncores[] = {
|
||||
|
||||
void hswep_uncore_cpu_init(void)
|
||||
{
|
||||
int pkg = topology_phys_to_logical_pkg(0);
|
||||
int pkg = boot_cpu_data.logical_proc_id;
|
||||
|
||||
if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
|
||||
hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
|
||||
|
Reference in New Issue
Block a user