Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (184 commits) perf probe: Clean up probe_point_lazy_walker() return value tracing: Fix irqoff selftest expanding max buffer tracing: Align 4 byte ints together in struct tracer tracing: Export trace_set_clr_event() tracing: Explain about unstable clock on resume with ring buffer warning ftrace/graph: Trace function entry before updating index ftrace: Add .ref.text as one of the safe areas to trace tracing: Adjust conditional expression latency formatting. tracing: Fix event alignment: skb:kfree_skb tracing: Fix event alignment: mce:mce_record tracing: Fix event alignment: kvm:kvm_hv_hypercall tracing: Fix event alignment: module:module_request tracing: Fix event alignment: ftrace:context_switch and ftrace:wakeup tracing: Remove lock_depth from event entry perf header: Stop using 'self' perf session: Use evlist/evsel for managing perf.data attributes perf top: Don't let events to eat up whole header line perf top: Fix events overflow in top command ring-buffer: Remove unused #include <linux/trace_irq.h> tracing: Add an 'overwrite' trace_option. ...
This commit is contained in:
@@ -606,9 +606,6 @@ static inline struct task_group *task_group(struct task_struct *p)
|
||||
struct task_group *tg;
|
||||
struct cgroup_subsys_state *css;
|
||||
|
||||
if (p->flags & PF_EXITING)
|
||||
return &root_task_group;
|
||||
|
||||
css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
|
||||
lockdep_is_held(&task_rq(p)->lock));
|
||||
tg = container_of(css, struct task_group, css);
|
||||
@@ -2265,27 +2262,6 @@ void kick_process(struct task_struct *p)
|
||||
EXPORT_SYMBOL_GPL(kick_process);
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/**
|
||||
* task_oncpu_function_call - call a function on the cpu on which a task runs
|
||||
* @p: the task to evaluate
|
||||
* @func: the function to be called
|
||||
* @info: the function call argument
|
||||
*
|
||||
* Calls the function @func when the task is currently running. This might
|
||||
* be on the current CPU, which just calls the function directly
|
||||
*/
|
||||
void task_oncpu_function_call(struct task_struct *p,
|
||||
void (*func) (void *info), void *info)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
preempt_disable();
|
||||
cpu = task_cpu(p);
|
||||
if (task_curr(p))
|
||||
smp_call_function_single(cpu, func, info, 1);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* ->cpus_allowed is protected by either TASK_WAKING or rq->lock held.
|
||||
@@ -2776,9 +2752,12 @@ static inline void
|
||||
prepare_task_switch(struct rq *rq, struct task_struct *prev,
|
||||
struct task_struct *next)
|
||||
{
|
||||
sched_info_switch(prev, next);
|
||||
perf_event_task_sched_out(prev, next);
|
||||
fire_sched_out_preempt_notifiers(prev, next);
|
||||
prepare_lock_switch(rq, next);
|
||||
prepare_arch_switch(next);
|
||||
trace_sched_switch(prev, next);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -2911,7 +2890,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
|
||||
struct mm_struct *mm, *oldmm;
|
||||
|
||||
prepare_task_switch(rq, prev, next);
|
||||
trace_sched_switch(prev, next);
|
||||
|
||||
mm = next->mm;
|
||||
oldmm = prev->active_mm;
|
||||
/*
|
||||
@@ -3989,9 +3968,6 @@ need_resched_nonpreemptible:
|
||||
rq->skip_clock_update = 0;
|
||||
|
||||
if (likely(prev != next)) {
|
||||
sched_info_switch(prev, next);
|
||||
perf_event_task_sched_out(prev, next);
|
||||
|
||||
rq->nr_switches++;
|
||||
rq->curr = next;
|
||||
++*switch_count;
|
||||
@@ -5572,7 +5548,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
|
||||
* The idle tasks have their own, simple scheduling class:
|
||||
*/
|
||||
idle->sched_class = &idle_sched_class;
|
||||
ftrace_graph_init_task(idle);
|
||||
ftrace_graph_init_idle_task(idle, cpu);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -8885,7 +8861,8 @@ cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
|
||||
}
|
||||
|
||||
static void
|
||||
cpu_cgroup_exit(struct cgroup_subsys *ss, struct task_struct *task)
|
||||
cpu_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
|
||||
struct cgroup *old_cgrp, struct task_struct *task)
|
||||
{
|
||||
/*
|
||||
* cgroup_exit() is called in the copy_process() failure path.
|
||||
|
Reference in New Issue
Block a user