Merge tag 'perf-urgent-2020-02-09' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Thomas Gleixner: "A set of fixes and improvements for the perf subsystem: Kernel fixes: - Install cgroup events to the correct CPU context to prevent a potential list double add - Prevent an integer underflow in the perf mlock accounting - Add a missing prototype for arch_perf_update_userpage() Tooling: - Add a missing unlock in the error path of maps__insert() in perf maps. - Fix the build with the latest libbfd - Fix the perf parser so it does not delete parse event terms, which caused a regression for using perf with the ARM CoreSight as the sink configuration was missing due to the deletion. - Fix the double free in the perf CPU map merging test case - Add the missing ustring support for the perf probe command" * tag 'perf-urgent-2020-02-09' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf maps: Add missing unlock to maps__insert() error case perf probe: Add ustring support for perf probe command perf: Make perf able to build with latest libbfd perf test: Fix test case Merge cpu map perf parse: Copy string to perf_evsel_config_term perf parse: Refactor 'struct perf_evsel_config_term' kernel/events: Add a missing prototype for arch_perf_update_userpage() perf/cgroups: Install cgroup events to correct cpuctx perf/core: Fix mlock accounting in perf_mmap()
This commit is contained in:
@@ -951,9 +951,9 @@ list_update_cgroup_event(struct perf_event *event,
|
||||
|
||||
/*
|
||||
* Because cgroup events are always per-cpu events,
|
||||
* this will always be called from the right CPU.
|
||||
* @ctx == &cpuctx->ctx.
|
||||
*/
|
||||
cpuctx = __get_cpu_context(ctx);
|
||||
cpuctx = container_of(ctx, struct perf_cpu_context, ctx);
|
||||
|
||||
/*
|
||||
* Since setting cpuctx->cgrp is conditional on the current @cgrp
|
||||
@@ -979,7 +979,8 @@ list_update_cgroup_event(struct perf_event *event,
|
||||
|
||||
cpuctx_entry = &cpuctx->cgrp_cpuctx_entry;
|
||||
if (add)
|
||||
list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list));
|
||||
list_add(cpuctx_entry,
|
||||
per_cpu_ptr(&cgrp_cpuctx_list, event->cpu));
|
||||
else
|
||||
list_del(cpuctx_entry);
|
||||
}
|
||||
@@ -5916,7 +5917,15 @@ accounting:
|
||||
*/
|
||||
user_lock_limit *= num_online_cpus();
|
||||
|
||||
user_locked = atomic_long_read(&user->locked_vm) + user_extra;
|
||||
user_locked = atomic_long_read(&user->locked_vm);
|
||||
|
||||
/*
|
||||
* sysctl_perf_event_mlock may have changed, so that
|
||||
* user->locked_vm > user_lock_limit
|
||||
*/
|
||||
if (user_locked > user_lock_limit)
|
||||
user_locked = user_lock_limit;
|
||||
user_locked += user_extra;
|
||||
|
||||
if (user_locked > user_lock_limit) {
|
||||
/*
|
||||
|
Reference in New Issue
Block a user