Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Ingo Molnar: "Two fixes on the kernel side: fix an over-eager condition that failed larger perf ring-buffer sizes, plus fix crashes in the Intel BTS code for a corner case, found by fuzzing" * 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/core: Fix impossible ring-buffer sizes warning perf/x86: Add check_period PMU callback
Bu işleme şunda yer alıyor:
@@ -2278,6 +2278,19 @@ void perf_check_microcode(void)
|
||||
x86_pmu.check_microcode();
|
||||
}
|
||||
|
||||
static int x86_pmu_check_period(struct perf_event *event, u64 value)
|
||||
{
|
||||
if (x86_pmu.check_period && x86_pmu.check_period(event, value))
|
||||
return -EINVAL;
|
||||
|
||||
if (value && x86_pmu.limit_period) {
|
||||
if (x86_pmu.limit_period(event, value) > value)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct pmu pmu = {
|
||||
.pmu_enable = x86_pmu_enable,
|
||||
.pmu_disable = x86_pmu_disable,
|
||||
@@ -2302,6 +2315,7 @@ static struct pmu pmu = {
|
||||
.event_idx = x86_pmu_event_idx,
|
||||
.sched_task = x86_pmu_sched_task,
|
||||
.task_ctx_size = sizeof(struct x86_perf_task_context),
|
||||
.check_period = x86_pmu_check_period,
|
||||
};
|
||||
|
||||
void arch_perf_update_userpage(struct perf_event *event,
|
||||
|
@@ -3587,6 +3587,11 @@ static void intel_pmu_sched_task(struct perf_event_context *ctx,
|
||||
intel_pmu_lbr_sched_task(ctx, sched_in);
|
||||
}
|
||||
|
||||
static int intel_pmu_check_period(struct perf_event *event, u64 value)
|
||||
{
|
||||
return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
|
||||
}
|
||||
|
||||
PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
|
||||
|
||||
PMU_FORMAT_ATTR(ldlat, "config1:0-15");
|
||||
@@ -3667,6 +3672,8 @@ static __initconst const struct x86_pmu core_pmu = {
|
||||
.cpu_starting = intel_pmu_cpu_starting,
|
||||
.cpu_dying = intel_pmu_cpu_dying,
|
||||
.cpu_dead = intel_pmu_cpu_dead,
|
||||
|
||||
.check_period = intel_pmu_check_period,
|
||||
};
|
||||
|
||||
static struct attribute *intel_pmu_attrs[];
|
||||
@@ -3711,6 +3718,8 @@ static __initconst const struct x86_pmu intel_pmu = {
|
||||
|
||||
.guest_get_msrs = intel_guest_get_msrs,
|
||||
.sched_task = intel_pmu_sched_task,
|
||||
|
||||
.check_period = intel_pmu_check_period,
|
||||
};
|
||||
|
||||
static __init void intel_clovertown_quirk(void)
|
||||
|
@@ -646,6 +646,11 @@ struct x86_pmu {
|
||||
* Intel host/guest support (KVM)
|
||||
*/
|
||||
struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
|
||||
|
||||
/*
|
||||
* Check period value for PERF_EVENT_IOC_PERIOD ioctl.
|
||||
*/
|
||||
int (*check_period) (struct perf_event *event, u64 period);
|
||||
};
|
||||
|
||||
struct x86_perf_task_context {
|
||||
@@ -857,7 +862,7 @@ static inline int amd_pmu_init(void)
|
||||
|
||||
#ifdef CONFIG_CPU_SUP_INTEL
|
||||
|
||||
static inline bool intel_pmu_has_bts(struct perf_event *event)
|
||||
static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
unsigned int hw_event, bts_event;
|
||||
@@ -868,7 +873,14 @@ static inline bool intel_pmu_has_bts(struct perf_event *event)
|
||||
hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
|
||||
bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
|
||||
|
||||
return hw_event == bts_event && hwc->sample_period == 1;
|
||||
return hw_event == bts_event && period == 1;
|
||||
}
|
||||
|
||||
static inline bool intel_pmu_has_bts(struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
|
||||
return intel_pmu_has_bts_period(event, hwc->sample_period);
|
||||
}
|
||||
|
||||
int intel_pmu_save_and_restart(struct perf_event *event);
|
||||
|
Yeni konuda referans
Bir kullanıcı engelle