Merge tag 'perf-urgent-2020-04-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Thomas Gleixner: "Three fixes/updates for perf: - Fix the perf event cgroup tracking which tries to track the cgroup even for disabled events. - Add Ice Lake server support for uncore events - Disable pagefaults when retrieving the physical address in the sampling code" * tag 'perf-urgent-2020-04-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/core: Disable page faults when getting phys address perf/x86/intel/uncore: Add Ice Lake server uncore support perf/cgroup: Correct indirection in perf_less_group_idx() perf/core: Fix event cgroup tracking
This commit is contained in:
@@ -983,16 +983,10 @@ perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
|
||||
event->shadow_ctx_time = now - t->timestamp;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update cpuctx->cgrp so that it is set when first cgroup event is added and
|
||||
* cleared when last cgroup event is removed.
|
||||
*/
|
||||
static inline void
|
||||
list_update_cgroup_event(struct perf_event *event,
|
||||
struct perf_event_context *ctx, bool add)
|
||||
perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx)
|
||||
{
|
||||
struct perf_cpu_context *cpuctx;
|
||||
struct list_head *cpuctx_entry;
|
||||
|
||||
if (!is_cgroup_event(event))
|
||||
return;
|
||||
@@ -1009,28 +1003,41 @@ list_update_cgroup_event(struct perf_event *event,
|
||||
* because if the first would mismatch, the second would not try again
|
||||
* and we would leave cpuctx->cgrp unset.
|
||||
*/
|
||||
if (add && !cpuctx->cgrp) {
|
||||
if (ctx->is_active && !cpuctx->cgrp) {
|
||||
struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
|
||||
|
||||
if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
|
||||
cpuctx->cgrp = cgrp;
|
||||
}
|
||||
|
||||
if (add && ctx->nr_cgroups++)
|
||||
return;
|
||||
else if (!add && --ctx->nr_cgroups)
|
||||
if (ctx->nr_cgroups++)
|
||||
return;
|
||||
|
||||
/* no cgroup running */
|
||||
if (!add)
|
||||
list_add(&cpuctx->cgrp_cpuctx_entry,
|
||||
per_cpu_ptr(&cgrp_cpuctx_list, event->cpu));
|
||||
}
|
||||
|
||||
static inline void
|
||||
perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx)
|
||||
{
|
||||
struct perf_cpu_context *cpuctx;
|
||||
|
||||
if (!is_cgroup_event(event))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Because cgroup events are always per-cpu events,
|
||||
* @ctx == &cpuctx->ctx.
|
||||
*/
|
||||
cpuctx = container_of(ctx, struct perf_cpu_context, ctx);
|
||||
|
||||
if (--ctx->nr_cgroups)
|
||||
return;
|
||||
|
||||
if (ctx->is_active && cpuctx->cgrp)
|
||||
cpuctx->cgrp = NULL;
|
||||
|
||||
cpuctx_entry = &cpuctx->cgrp_cpuctx_entry;
|
||||
if (add)
|
||||
list_add(cpuctx_entry,
|
||||
per_cpu_ptr(&cgrp_cpuctx_list, event->cpu));
|
||||
else
|
||||
list_del(cpuctx_entry);
|
||||
list_del(&cpuctx->cgrp_cpuctx_entry);
|
||||
}
|
||||
|
||||
#else /* !CONFIG_CGROUP_PERF */
|
||||
@@ -1096,11 +1103,14 @@ static inline u64 perf_cgroup_event_time(struct perf_event *event)
|
||||
}
|
||||
|
||||
static inline void
|
||||
list_update_cgroup_event(struct perf_event *event,
|
||||
struct perf_event_context *ctx, bool add)
|
||||
perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -1791,13 +1801,14 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
|
||||
add_event_to_groups(event, ctx);
|
||||
}
|
||||
|
||||
list_update_cgroup_event(event, ctx, true);
|
||||
|
||||
list_add_rcu(&event->event_entry, &ctx->event_list);
|
||||
ctx->nr_events++;
|
||||
if (event->attr.inherit_stat)
|
||||
ctx->nr_stat++;
|
||||
|
||||
if (event->state > PERF_EVENT_STATE_OFF)
|
||||
perf_cgroup_event_enable(event, ctx);
|
||||
|
||||
ctx->generation++;
|
||||
}
|
||||
|
||||
@@ -1976,8 +1987,6 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
|
||||
|
||||
event->attach_state &= ~PERF_ATTACH_CONTEXT;
|
||||
|
||||
list_update_cgroup_event(event, ctx, false);
|
||||
|
||||
ctx->nr_events--;
|
||||
if (event->attr.inherit_stat)
|
||||
ctx->nr_stat--;
|
||||
@@ -1994,8 +2003,10 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
|
||||
* of error state is by explicit re-enabling
|
||||
* of the event
|
||||
*/
|
||||
if (event->state > PERF_EVENT_STATE_OFF)
|
||||
if (event->state > PERF_EVENT_STATE_OFF) {
|
||||
perf_cgroup_event_disable(event, ctx);
|
||||
perf_event_set_state(event, PERF_EVENT_STATE_OFF);
|
||||
}
|
||||
|
||||
ctx->generation++;
|
||||
}
|
||||
@@ -2226,6 +2237,7 @@ event_sched_out(struct perf_event *event,
|
||||
|
||||
if (READ_ONCE(event->pending_disable) >= 0) {
|
||||
WRITE_ONCE(event->pending_disable, -1);
|
||||
perf_cgroup_event_disable(event, ctx);
|
||||
state = PERF_EVENT_STATE_OFF;
|
||||
}
|
||||
perf_event_set_state(event, state);
|
||||
@@ -2363,6 +2375,7 @@ static void __perf_event_disable(struct perf_event *event,
|
||||
event_sched_out(event, cpuctx, ctx);
|
||||
|
||||
perf_event_set_state(event, PERF_EVENT_STATE_OFF);
|
||||
perf_cgroup_event_disable(event, ctx);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2746,7 +2759,7 @@ static int __perf_install_in_context(void *info)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CGROUP_PERF
|
||||
if (is_cgroup_event(event)) {
|
||||
if (event->state > PERF_EVENT_STATE_OFF && is_cgroup_event(event)) {
|
||||
/*
|
||||
* If the current cgroup doesn't match the event's
|
||||
* cgroup, we should not try to schedule it.
|
||||
@@ -2906,6 +2919,7 @@ static void __perf_event_enable(struct perf_event *event,
|
||||
ctx_sched_out(ctx, cpuctx, EVENT_TIME);
|
||||
|
||||
perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
|
||||
perf_cgroup_event_enable(event, ctx);
|
||||
|
||||
if (!ctx->is_active)
|
||||
return;
|
||||
@@ -3508,7 +3522,8 @@ static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
|
||||
|
||||
static bool perf_less_group_idx(const void *l, const void *r)
|
||||
{
|
||||
const struct perf_event *le = l, *re = r;
|
||||
const struct perf_event *le = *(const struct perf_event **)l;
|
||||
const struct perf_event *re = *(const struct perf_event **)r;
|
||||
|
||||
return le->group_index < re->group_index;
|
||||
}
|
||||
@@ -3616,8 +3631,10 @@ static int merge_sched_in(struct perf_event *event, void *data)
|
||||
}
|
||||
|
||||
if (event->state == PERF_EVENT_STATE_INACTIVE) {
|
||||
if (event->attr.pinned)
|
||||
if (event->attr.pinned) {
|
||||
perf_cgroup_event_disable(event, ctx);
|
||||
perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
|
||||
}
|
||||
|
||||
*can_add_hw = 0;
|
||||
ctx->rotate_necessary = 1;
|
||||
@@ -6917,9 +6934,12 @@ static u64 perf_virt_to_phys(u64 virt)
|
||||
* Try IRQ-safe __get_user_pages_fast first.
|
||||
* If failed, leave phys_addr as 0.
|
||||
*/
|
||||
if ((current->mm != NULL) &&
|
||||
(__get_user_pages_fast(virt, 1, 0, &p) == 1))
|
||||
phys_addr = page_to_phys(p) + virt % PAGE_SIZE;
|
||||
if (current->mm != NULL) {
|
||||
pagefault_disable();
|
||||
if (__get_user_pages_fast(virt, 1, 0, &p) == 1)
|
||||
phys_addr = page_to_phys(p) + virt % PAGE_SIZE;
|
||||
pagefault_enable();
|
||||
}
|
||||
|
||||
if (p)
|
||||
put_page(p);
|
||||
|
Reference in New Issue
Block a user