Merge commit 'v3.1-rc7' into perf/core
Merge reason: Pick up the latest upstream fixes. Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
@@ -400,14 +400,54 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static inline void perf_cgroup_sched_out(struct task_struct *task)
|
||||
static inline void perf_cgroup_sched_out(struct task_struct *task,
|
||||
struct task_struct *next)
|
||||
{
|
||||
perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
|
||||
struct perf_cgroup *cgrp1;
|
||||
struct perf_cgroup *cgrp2 = NULL;
|
||||
|
||||
/*
|
||||
* we come here when we know perf_cgroup_events > 0
|
||||
*/
|
||||
cgrp1 = perf_cgroup_from_task(task);
|
||||
|
||||
/*
|
||||
* next is NULL when called from perf_event_enable_on_exec()
|
||||
* that will systematically cause a cgroup_switch()
|
||||
*/
|
||||
if (next)
|
||||
cgrp2 = perf_cgroup_from_task(next);
|
||||
|
||||
/*
|
||||
* only schedule out current cgroup events if we know
|
||||
* that we are switching to a different cgroup. Otherwise,
|
||||
* do no touch the cgroup events.
|
||||
*/
|
||||
if (cgrp1 != cgrp2)
|
||||
perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
|
||||
}
|
||||
|
||||
static inline void perf_cgroup_sched_in(struct task_struct *task)
|
||||
static inline void perf_cgroup_sched_in(struct task_struct *prev,
|
||||
struct task_struct *task)
|
||||
{
|
||||
perf_cgroup_switch(task, PERF_CGROUP_SWIN);
|
||||
struct perf_cgroup *cgrp1;
|
||||
struct perf_cgroup *cgrp2 = NULL;
|
||||
|
||||
/*
|
||||
* we come here when we know perf_cgroup_events > 0
|
||||
*/
|
||||
cgrp1 = perf_cgroup_from_task(task);
|
||||
|
||||
/* prev can never be NULL */
|
||||
cgrp2 = perf_cgroup_from_task(prev);
|
||||
|
||||
/*
|
||||
* only need to schedule in cgroup events if we are changing
|
||||
* cgroup during ctxsw. Cgroup events were not scheduled
|
||||
* out of ctxsw out if that was not the case.
|
||||
*/
|
||||
if (cgrp1 != cgrp2)
|
||||
perf_cgroup_switch(task, PERF_CGROUP_SWIN);
|
||||
}
|
||||
|
||||
static inline int perf_cgroup_connect(int fd, struct perf_event *event,
|
||||
@@ -519,11 +559,13 @@ static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void perf_cgroup_sched_out(struct task_struct *task)
|
||||
static inline void perf_cgroup_sched_out(struct task_struct *task,
|
||||
struct task_struct *next)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void perf_cgroup_sched_in(struct task_struct *task)
|
||||
static inline void perf_cgroup_sched_in(struct task_struct *prev,
|
||||
struct task_struct *task)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -1989,7 +2031,7 @@ void __perf_event_task_sched_out(struct task_struct *task,
|
||||
* cgroup event are system-wide mode only
|
||||
*/
|
||||
if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
|
||||
perf_cgroup_sched_out(task);
|
||||
perf_cgroup_sched_out(task, next);
|
||||
}
|
||||
|
||||
static void task_ctx_sched_out(struct perf_event_context *ctx)
|
||||
@@ -2154,7 +2196,8 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
|
||||
* accessing the event control register. If a NMI hits, then it will
|
||||
* keep the event running.
|
||||
*/
|
||||
void __perf_event_task_sched_in(struct task_struct *task)
|
||||
void __perf_event_task_sched_in(struct task_struct *prev,
|
||||
struct task_struct *task)
|
||||
{
|
||||
struct perf_event_context *ctx;
|
||||
int ctxn;
|
||||
@@ -2172,7 +2215,7 @@ void __perf_event_task_sched_in(struct task_struct *task)
|
||||
* cgroup event are system-wide mode only
|
||||
*/
|
||||
if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
|
||||
perf_cgroup_sched_in(task);
|
||||
perf_cgroup_sched_in(prev, task);
|
||||
}
|
||||
|
||||
static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
|
||||
@@ -2428,7 +2471,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
|
||||
* ctxswin cgroup events which are already scheduled
|
||||
* in.
|
||||
*/
|
||||
perf_cgroup_sched_out(current);
|
||||
perf_cgroup_sched_out(current, NULL);
|
||||
|
||||
raw_spin_lock(&ctx->lock);
|
||||
task_ctx_sched_out(ctx);
|
||||
@@ -3354,8 +3397,8 @@ static int perf_event_index(struct perf_event *event)
|
||||
}
|
||||
|
||||
static void calc_timer_values(struct perf_event *event,
|
||||
u64 *running,
|
||||
u64 *enabled)
|
||||
u64 *enabled,
|
||||
u64 *running)
|
||||
{
|
||||
u64 now, ctx_time;
|
||||
|
||||
|
Reference in New Issue
Block a user