Merge branch 'linus' into sched/core
Conflicts: fs/proc/array.c Merge reason: resolve conflict and queue up dependent patch. Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
110
kernel/sched.c
110
kernel/sched.c
@@ -39,7 +39,7 @@
|
||||
#include <linux/completion.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/debug_locks.h>
|
||||
#include <linux/perf_counter.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/security.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/profile.h>
|
||||
@@ -676,6 +676,7 @@ inline void update_rq_clock(struct rq *rq)
|
||||
|
||||
/**
|
||||
* runqueue_is_locked
|
||||
* @cpu: the processor in question.
|
||||
*
|
||||
* Returns true if the current cpu runqueue is locked.
|
||||
* This interface allows printk to be called with the runqueue lock
|
||||
@@ -780,7 +781,7 @@ static int sched_feat_open(struct inode *inode, struct file *filp)
|
||||
return single_open(filp, sched_feat_show, NULL);
|
||||
}
|
||||
|
||||
static struct file_operations sched_feat_fops = {
|
||||
static const struct file_operations sched_feat_fops = {
|
||||
.open = sched_feat_open,
|
||||
.write = sched_feat_write,
|
||||
.read = seq_read,
|
||||
@@ -2053,7 +2054,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
|
||||
if (task_hot(p, old_rq->clock, NULL))
|
||||
schedstat_inc(p, se.nr_forced2_migrations);
|
||||
#endif
|
||||
perf_swcounter_event(PERF_COUNT_SW_CPU_MIGRATIONS,
|
||||
perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS,
|
||||
1, 1, NULL, 0);
|
||||
}
|
||||
p->se.vruntime -= old_cfsrq->min_vruntime -
|
||||
@@ -2311,7 +2312,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
|
||||
{
|
||||
int cpu, orig_cpu, this_cpu, success = 0;
|
||||
unsigned long flags;
|
||||
struct rq *rq;
|
||||
struct rq *rq, *orig_rq;
|
||||
|
||||
if (!sched_feat(SYNC_WAKEUPS))
|
||||
wake_flags &= ~WF_SYNC;
|
||||
@@ -2319,7 +2320,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
|
||||
this_cpu = get_cpu();
|
||||
|
||||
smp_wmb();
|
||||
rq = task_rq_lock(p, &flags);
|
||||
rq = orig_rq = task_rq_lock(p, &flags);
|
||||
update_rq_clock(rq);
|
||||
if (!(p->state & state))
|
||||
goto out;
|
||||
@@ -2350,6 +2351,10 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
|
||||
set_task_cpu(p, cpu);
|
||||
|
||||
rq = task_rq_lock(p, &flags);
|
||||
|
||||
if (rq != orig_rq)
|
||||
update_rq_clock(rq);
|
||||
|
||||
WARN_ON(p->state != TASK_WAKING);
|
||||
cpu = task_cpu(p);
|
||||
|
||||
@@ -2514,23 +2519,18 @@ void sched_fork(struct task_struct *p, int clone_flags)
|
||||
|
||||
__sched_fork(p);
|
||||
|
||||
/*
|
||||
* Make sure we do not leak PI boosting priority to the child.
|
||||
*/
|
||||
p->prio = current->normal_prio;
|
||||
|
||||
/*
|
||||
* Revert to default priority/policy on fork if requested.
|
||||
*/
|
||||
if (unlikely(p->sched_reset_on_fork)) {
|
||||
if (p->policy == SCHED_FIFO || p->policy == SCHED_RR)
|
||||
if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) {
|
||||
p->policy = SCHED_NORMAL;
|
||||
|
||||
if (p->normal_prio < DEFAULT_PRIO)
|
||||
p->prio = DEFAULT_PRIO;
|
||||
p->normal_prio = p->static_prio;
|
||||
}
|
||||
|
||||
if (PRIO_TO_NICE(p->static_prio) < 0) {
|
||||
p->static_prio = NICE_TO_PRIO(0);
|
||||
p->normal_prio = p->static_prio;
|
||||
set_load_weight(p);
|
||||
}
|
||||
|
||||
@@ -2541,6 +2541,11 @@ void sched_fork(struct task_struct *p, int clone_flags)
|
||||
p->sched_reset_on_fork = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure we do not leak PI boosting priority to the child.
|
||||
*/
|
||||
p->prio = current->normal_prio;
|
||||
|
||||
if (!rt_prio(p->prio))
|
||||
p->sched_class = &fair_sched_class;
|
||||
|
||||
@@ -2581,8 +2586,6 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
|
||||
BUG_ON(p->state != TASK_RUNNING);
|
||||
update_rq_clock(rq);
|
||||
|
||||
p->prio = effective_prio(p);
|
||||
|
||||
if (!p->sched_class->task_new || !current->se.on_rq) {
|
||||
activate_task(rq, p, 0);
|
||||
} else {
|
||||
@@ -2718,7 +2721,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
|
||||
*/
|
||||
prev_state = prev->state;
|
||||
finish_arch_switch(prev);
|
||||
perf_counter_task_sched_in(current, cpu_of(rq));
|
||||
perf_event_task_sched_in(current, cpu_of(rq));
|
||||
finish_lock_switch(rq, prev);
|
||||
|
||||
fire_sched_in_preempt_notifiers(current);
|
||||
@@ -2904,6 +2907,19 @@ unsigned long nr_iowait(void)
|
||||
return sum;
|
||||
}
|
||||
|
||||
unsigned long nr_iowait_cpu(void)
|
||||
{
|
||||
struct rq *this = this_rq();
|
||||
return atomic_read(&this->nr_iowait);
|
||||
}
|
||||
|
||||
unsigned long this_cpu_load(void)
|
||||
{
|
||||
struct rq *this = this_rq();
|
||||
return this->cpu_load[0];
|
||||
}
|
||||
|
||||
|
||||
/* Variables and functions for calc_load */
|
||||
static atomic_long_t calc_load_tasks;
|
||||
static unsigned long calc_load_update;
|
||||
@@ -3645,6 +3661,7 @@ static void update_group_power(struct sched_domain *sd, int cpu)
|
||||
|
||||
/**
|
||||
* update_sg_lb_stats - Update sched_group's statistics for load balancing.
|
||||
* @sd: The sched_domain whose statistics are to be updated.
|
||||
* @group: sched_group whose statistics are to be updated.
|
||||
* @this_cpu: Cpu for which load balance is currently performed.
|
||||
* @idle: Idle status of this_cpu
|
||||
@@ -5079,17 +5096,16 @@ void account_idle_time(cputime_t cputime)
|
||||
*/
|
||||
void account_process_tick(struct task_struct *p, int user_tick)
|
||||
{
|
||||
cputime_t one_jiffy = jiffies_to_cputime(1);
|
||||
cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy);
|
||||
cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
|
||||
struct rq *rq = this_rq();
|
||||
|
||||
if (user_tick)
|
||||
account_user_time(p, one_jiffy, one_jiffy_scaled);
|
||||
account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
|
||||
else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
|
||||
account_system_time(p, HARDIRQ_OFFSET, one_jiffy,
|
||||
account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
|
||||
one_jiffy_scaled);
|
||||
else
|
||||
account_idle_time(one_jiffy);
|
||||
account_idle_time(cputime_one_jiffy);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -5193,7 +5209,7 @@ void scheduler_tick(void)
|
||||
curr->sched_class->task_tick(rq, curr, 0);
|
||||
spin_unlock(&rq->lock);
|
||||
|
||||
perf_counter_task_tick(curr, cpu);
|
||||
perf_event_task_tick(curr, cpu);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
rq->idle_at_tick = idle_cpu(cpu);
|
||||
@@ -5409,7 +5425,7 @@ need_resched_nonpreemptible:
|
||||
|
||||
if (likely(prev != next)) {
|
||||
sched_info_switch(prev, next);
|
||||
perf_counter_task_sched_out(prev, next, cpu);
|
||||
perf_event_task_sched_out(prev, next, cpu);
|
||||
|
||||
rq->nr_switches++;
|
||||
rq->curr = next;
|
||||
@@ -6708,9 +6724,6 @@ EXPORT_SYMBOL(yield);
|
||||
/*
|
||||
* This task is about to go to sleep on IO. Increment rq->nr_iowait so
|
||||
* that process accounting knows that this is a task in IO wait state.
|
||||
*
|
||||
* But don't do that if it is a deliberate, throttling IO wait (this task
|
||||
* has set its backing_dev_info: the queue against which it should throttle)
|
||||
*/
|
||||
void __sched io_schedule(void)
|
||||
{
|
||||
@@ -7671,7 +7684,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
||||
/*
|
||||
* Register at high priority so that task migration (migrate_all_tasks)
|
||||
* happens before everything else. This has to be lower priority than
|
||||
* the notifier in the perf_counter subsystem, though.
|
||||
* the notifier in the perf_event subsystem, though.
|
||||
*/
|
||||
static struct notifier_block __cpuinitdata migration_notifier = {
|
||||
.notifier_call = migration_call,
|
||||
@@ -9524,7 +9537,7 @@ void __init sched_init(void)
|
||||
alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
|
||||
#endif /* SMP */
|
||||
|
||||
perf_counter_init();
|
||||
perf_event_init();
|
||||
|
||||
scheduler_running = 1;
|
||||
}
|
||||
@@ -10296,7 +10309,7 @@ static int sched_rt_global_constraints(void)
|
||||
#endif /* CONFIG_RT_GROUP_SCHED */
|
||||
|
||||
int sched_rt_handler(struct ctl_table *table, int write,
|
||||
struct file *filp, void __user *buffer, size_t *lenp,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
{
|
||||
int ret;
|
||||
@@ -10307,7 +10320,7 @@ int sched_rt_handler(struct ctl_table *table, int write,
|
||||
old_period = sysctl_sched_rt_period;
|
||||
old_runtime = sysctl_sched_rt_runtime;
|
||||
|
||||
ret = proc_dointvec(table, write, filp, buffer, lenp, ppos);
|
||||
ret = proc_dointvec(table, write, buffer, lenp, ppos);
|
||||
|
||||
if (!ret && write) {
|
||||
ret = sched_rt_global_constraints();
|
||||
@@ -10361,8 +10374,7 @@ cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
|
||||
}
|
||||
|
||||
static int
|
||||
cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
|
||||
struct task_struct *tsk)
|
||||
cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
|
||||
{
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
|
||||
@@ -10372,15 +10384,45 @@ cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
|
||||
if (tsk->sched_class != &fair_sched_class)
|
||||
return -EINVAL;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
|
||||
struct task_struct *tsk, bool threadgroup)
|
||||
{
|
||||
int retval = cpu_cgroup_can_attach_task(cgrp, tsk);
|
||||
if (retval)
|
||||
return retval;
|
||||
if (threadgroup) {
|
||||
struct task_struct *c;
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
|
||||
retval = cpu_cgroup_can_attach_task(cgrp, c);
|
||||
if (retval) {
|
||||
rcu_read_unlock();
|
||||
return retval;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
|
||||
struct cgroup *old_cont, struct task_struct *tsk)
|
||||
struct cgroup *old_cont, struct task_struct *tsk,
|
||||
bool threadgroup)
|
||||
{
|
||||
sched_move_task(tsk);
|
||||
if (threadgroup) {
|
||||
struct task_struct *c;
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
|
||||
sched_move_task(c);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
|
Reference in New Issue
Block a user