Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Ingo Molnar: "Two hw-enablement patches, two race fixes, three fixes for regressions of semantics, plus a number of tooling fixes" * 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/x86/intel: Add proper condition to run sched_task callbacks perf/core: Fix locking for children siblings group read perf/core: Fix scheduling regression of pinned groups perf/x86/intel: Fix debug_store reset field for freq events perf/x86/intel: Add Goldmont Plus CPU PMU support perf/x86/intel: Enable C-state residency events for Apollo Lake perf symbols: Accept zero as the kernel base address Revert "perf/core: Drop kernel samples even though :u is specified" perf annotate: Fix broken arrow at row 0 connecting jmp instruction to its target perf evsel: State in the default event name if attr.exclude_kernel is set perf evsel: Fix attr.exclude_kernel setting for default cycles:p
This commit is contained in:
@@ -1452,6 +1452,13 @@ static enum event_type_t get_event_type(struct perf_event *event)
|
||||
|
||||
lockdep_assert_held(&ctx->lock);
|
||||
|
||||
/*
|
||||
* It's 'group type', really, because if our group leader is
|
||||
* pinned, so are we.
|
||||
*/
|
||||
if (event->group_leader != event)
|
||||
event = event->group_leader;
|
||||
|
||||
event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE;
|
||||
if (!ctx->task)
|
||||
event_type |= EVENT_CPU;
|
||||
@@ -4378,7 +4385,9 @@ EXPORT_SYMBOL_GPL(perf_event_read_value);
|
||||
static int __perf_read_group_add(struct perf_event *leader,
|
||||
u64 read_format, u64 *values)
|
||||
{
|
||||
struct perf_event_context *ctx = leader->ctx;
|
||||
struct perf_event *sub;
|
||||
unsigned long flags;
|
||||
int n = 1; /* skip @nr */
|
||||
int ret;
|
||||
|
||||
@@ -4408,12 +4417,15 @@ static int __perf_read_group_add(struct perf_event *leader,
|
||||
if (read_format & PERF_FORMAT_ID)
|
||||
values[n++] = primary_event_id(leader);
|
||||
|
||||
raw_spin_lock_irqsave(&ctx->lock, flags);
|
||||
|
||||
list_for_each_entry(sub, &leader->sibling_list, group_entry) {
|
||||
values[n++] += perf_event_count(sub);
|
||||
if (read_format & PERF_FORMAT_ID)
|
||||
values[n++] = primary_event_id(sub);
|
||||
}
|
||||
|
||||
raw_spin_unlock_irqrestore(&ctx->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -7321,21 +7333,6 @@ int perf_event_account_interrupt(struct perf_event *event)
|
||||
return __perf_event_account_interrupt(event, 1);
|
||||
}
|
||||
|
||||
static bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs)
|
||||
{
|
||||
/*
|
||||
* Due to interrupt latency (AKA "skid"), we may enter the
|
||||
* kernel before taking an overflow, even if the PMU is only
|
||||
* counting user events.
|
||||
* To avoid leaking information to userspace, we must always
|
||||
* reject kernel samples when exclude_kernel is set.
|
||||
*/
|
||||
if (event->attr.exclude_kernel && !user_mode(regs))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Generic event overflow handling, sampling.
|
||||
*/
|
||||
@@ -7356,12 +7353,6 @@ static int __perf_event_overflow(struct perf_event *event,
|
||||
|
||||
ret = __perf_event_account_interrupt(event, throttle);
|
||||
|
||||
/*
|
||||
* For security, drop the skid kernel samples if necessary.
|
||||
*/
|
||||
if (!sample_is_allowed(event, regs))
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* XXX event_limit might not quite work as expected on inherited
|
||||
* events
|
||||
|
Reference in New Issue
Block a user