Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 perf updates from Ingo Molnar: "This series tightens up RDPMC permissions: currently even highly sandboxed x86 execution environments (such as seccomp) have permission to execute RDPMC, which may leak various perf events / PMU state such as timing information and other CPU execution details. This 'all is allowed' RDPMC mode is still preserved as the (non-default) /sys/devices/cpu/rdpmc=2 setting. The new default is that RDPMC access is only allowed if a perf event is mmap-ed (which is needed to correctly interpret RDPMC counter values in any case). As a side effect of these changes CR4 handling is cleaned up in the x86 code and a shadow copy of the CR4 value is added. The extra CR4 manipulation adds ~ <50ns to the context switch cost between rdpmc-capable and rdpmc-non-capable mms" * 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/x86: Add /sys/devices/cpu/rdpmc=2 to allow rdpmc for all tasks perf/x86: Only allow rdpmc if a perf_event is mapped perf: Pass the event to arch_perf_update_userpage() perf: Add pmu callbacks to track event mapping and unmapping x86: Add a comment clarifying LDT context switching x86: Store a per-cpu shadow copy of CR4 x86: Clean up cr4 manipulation
This commit is contained in:
@@ -4101,7 +4101,8 @@ unlock:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
|
||||
void __weak arch_perf_update_userpage(
|
||||
struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -4151,7 +4152,7 @@ void perf_event_update_userpage(struct perf_event *event)
|
||||
userpg->time_running = running +
|
||||
atomic64_read(&event->child_total_time_running);
|
||||
|
||||
arch_perf_update_userpage(userpg, now);
|
||||
arch_perf_update_userpage(event, userpg, now);
|
||||
|
||||
barrier();
|
||||
++userpg->lock;
|
||||
@@ -4293,6 +4294,9 @@ static void perf_mmap_open(struct vm_area_struct *vma)
|
||||
|
||||
atomic_inc(&event->mmap_count);
|
||||
atomic_inc(&event->rb->mmap_count);
|
||||
|
||||
if (event->pmu->event_mapped)
|
||||
event->pmu->event_mapped(event);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -4312,6 +4316,9 @@ static void perf_mmap_close(struct vm_area_struct *vma)
|
||||
int mmap_locked = rb->mmap_locked;
|
||||
unsigned long size = perf_data_size(rb);
|
||||
|
||||
if (event->pmu->event_unmapped)
|
||||
event->pmu->event_unmapped(event);
|
||||
|
||||
atomic_dec(&rb->mmap_count);
|
||||
|
||||
if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
|
||||
@@ -4513,6 +4520,9 @@ unlock:
|
||||
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vma->vm_ops = &perf_mmap_vmops;
|
||||
|
||||
if (event->pmu->event_mapped)
|
||||
event->pmu->event_mapped(event);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user