Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler changes from Ingo Molnar: "The main changes in this development cycle were: - full dynticks preparatory work by Frederic Weisbecker - factor out the cpu time accounting code better, by Li Zefan - multi-CPU load balancer cleanups and improvements by Joonsoo Kim - various smaller fixes and cleanups" * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (45 commits) sched: Fix init NOHZ_IDLE flag sched: Prevent to re-select dst-cpu in load_balance() sched: Rename load_balance_tmpmask to load_balance_mask sched: Move up affinity check to mitigate useless redoing overhead sched: Don't consider other cpus in our group in case of NEWLY_IDLE sched: Explicitly cpu_idle_type checking in rebalance_domains() sched: Change position of resched_cpu() in load_balance() sched: Fix wrong rq's runnable_avg update with rt tasks sched: Document task_struct::personality field sched/cpuacct/UML: Fix header file dependency bug on the UML build cgroup: Kill subsys.active flag sched/cpuacct: No need to check subsys active state sched/cpuacct: Initialize cpuacct subsystem earlier sched/cpuacct: Initialize root cpuacct earlier sched/cpuacct: Allocate per_cpu cpuusage for root cpuacct statically sched/cpuacct: Clean up cpuacct.h sched/cpuacct: Remove redundant NULL checks in cpuacct_acount_field() sched/cpuacct: Remove redundant NULL checks in cpuacct_charge() sched/cpuacct: Add cpuacct_acount_field() sched/cpuacct: Add cpuacct_init() ...
This commit is contained in:
@@ -127,18 +127,6 @@ extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
|
||||
extern void proc_sched_set_task(struct task_struct *p);
|
||||
extern void
|
||||
print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
|
||||
#else
|
||||
static inline void
|
||||
proc_sched_show_task(struct task_struct *p, struct seq_file *m)
|
||||
{
|
||||
}
|
||||
static inline void proc_sched_set_task(struct task_struct *p)
|
||||
{
|
||||
}
|
||||
static inline void
|
||||
print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -570,7 +558,7 @@ struct signal_struct {
|
||||
cputime_t utime, stime, cutime, cstime;
|
||||
cputime_t gtime;
|
||||
cputime_t cgtime;
|
||||
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
||||
struct cputime prev_cputime;
|
||||
#endif
|
||||
unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
|
||||
@@ -767,31 +755,6 @@ enum cpu_idle_type {
|
||||
CPU_MAX_IDLE_TYPES
|
||||
};
|
||||
|
||||
/*
|
||||
* Increase resolution of nice-level calculations for 64-bit architectures.
|
||||
* The extra resolution improves shares distribution and load balancing of
|
||||
* low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
|
||||
* hierarchies, especially on larger systems. This is not a user-visible change
|
||||
* and does not change the user-interface for setting shares/weights.
|
||||
*
|
||||
* We increase resolution only if we have enough bits to allow this increased
|
||||
* resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution
|
||||
* when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the
|
||||
* increased costs.
|
||||
*/
|
||||
#if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load */
|
||||
# define SCHED_LOAD_RESOLUTION 10
|
||||
# define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION)
|
||||
# define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION)
|
||||
#else
|
||||
# define SCHED_LOAD_RESOLUTION 0
|
||||
# define scale_load(w) (w)
|
||||
# define scale_load_down(w) (w)
|
||||
#endif
|
||||
|
||||
#define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION)
|
||||
#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
|
||||
|
||||
/*
|
||||
* Increase resolution of cpu_power calculations
|
||||
*/
|
||||
@@ -817,62 +780,6 @@ enum cpu_idle_type {
|
||||
|
||||
extern int __weak arch_sd_sibiling_asym_packing(void);
|
||||
|
||||
struct sched_group_power {
|
||||
atomic_t ref;
|
||||
/*
|
||||
* CPU power of this group, SCHED_LOAD_SCALE being max power for a
|
||||
* single CPU.
|
||||
*/
|
||||
unsigned int power, power_orig;
|
||||
unsigned long next_update;
|
||||
/*
|
||||
* Number of busy cpus in this group.
|
||||
*/
|
||||
atomic_t nr_busy_cpus;
|
||||
|
||||
unsigned long cpumask[0]; /* iteration mask */
|
||||
};
|
||||
|
||||
struct sched_group {
|
||||
struct sched_group *next; /* Must be a circular list */
|
||||
atomic_t ref;
|
||||
|
||||
unsigned int group_weight;
|
||||
struct sched_group_power *sgp;
|
||||
|
||||
/*
|
||||
* The CPUs this group covers.
|
||||
*
|
||||
* NOTE: this field is variable length. (Allocated dynamically
|
||||
* by attaching extra space to the end of the structure,
|
||||
* depending on how many CPUs the kernel has booted up with)
|
||||
*/
|
||||
unsigned long cpumask[0];
|
||||
};
|
||||
|
||||
static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
|
||||
{
|
||||
return to_cpumask(sg->cpumask);
|
||||
}
|
||||
|
||||
/*
|
||||
* cpumask masking which cpus in the group are allowed to iterate up the domain
|
||||
* tree.
|
||||
*/
|
||||
static inline struct cpumask *sched_group_mask(struct sched_group *sg)
|
||||
{
|
||||
return to_cpumask(sg->sgp->cpumask);
|
||||
}
|
||||
|
||||
/**
|
||||
* group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
|
||||
* @group: The group whose first cpu is to be returned.
|
||||
*/
|
||||
static inline unsigned int group_first_cpu(struct sched_group *group)
|
||||
{
|
||||
return cpumask_first(sched_group_cpus(group));
|
||||
}
|
||||
|
||||
struct sched_domain_attr {
|
||||
int relax_domain_level;
|
||||
};
|
||||
@@ -883,6 +790,8 @@ struct sched_domain_attr {
|
||||
|
||||
extern int sched_domain_level_max;
|
||||
|
||||
struct sched_group;
|
||||
|
||||
struct sched_domain {
|
||||
/* These fields must be setup */
|
||||
struct sched_domain *parent; /* top domain must be null terminated */
|
||||
@@ -899,6 +808,8 @@ struct sched_domain {
|
||||
unsigned int wake_idx;
|
||||
unsigned int forkexec_idx;
|
||||
unsigned int smt_gain;
|
||||
|
||||
int nohz_idle; /* NOHZ IDLE status */
|
||||
int flags; /* See SD_* */
|
||||
int level;
|
||||
|
||||
@@ -971,18 +882,6 @@ extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
|
||||
cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
|
||||
void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
|
||||
|
||||
/* Test a flag in parent sched domain */
|
||||
static inline int test_sd_parent(struct sched_domain *sd, int flag)
|
||||
{
|
||||
if (sd->parent && (sd->parent->flags & flag))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
|
||||
unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
|
||||
|
||||
bool cpus_share_cache(int this_cpu, int that_cpu);
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
@@ -1017,72 +916,6 @@ struct mempolicy;
|
||||
struct pipe_inode_info;
|
||||
struct uts_namespace;
|
||||
|
||||
struct rq;
|
||||
struct sched_domain;
|
||||
|
||||
/*
|
||||
* wake flags
|
||||
*/
|
||||
#define WF_SYNC 0x01 /* waker goes to sleep after wakup */
|
||||
#define WF_FORK 0x02 /* child wakeup after fork */
|
||||
#define WF_MIGRATED 0x04 /* internal use, task got migrated */
|
||||
|
||||
#define ENQUEUE_WAKEUP 1
|
||||
#define ENQUEUE_HEAD 2
|
||||
#ifdef CONFIG_SMP
|
||||
#define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */
|
||||
#else
|
||||
#define ENQUEUE_WAKING 0
|
||||
#endif
|
||||
|
||||
#define DEQUEUE_SLEEP 1
|
||||
|
||||
struct sched_class {
|
||||
const struct sched_class *next;
|
||||
|
||||
void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
|
||||
void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
|
||||
void (*yield_task) (struct rq *rq);
|
||||
bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
|
||||
|
||||
void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
|
||||
|
||||
struct task_struct * (*pick_next_task) (struct rq *rq);
|
||||
void (*put_prev_task) (struct rq *rq, struct task_struct *p);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
|
||||
void (*migrate_task_rq)(struct task_struct *p, int next_cpu);
|
||||
|
||||
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
|
||||
void (*post_schedule) (struct rq *this_rq);
|
||||
void (*task_waking) (struct task_struct *task);
|
||||
void (*task_woken) (struct rq *this_rq, struct task_struct *task);
|
||||
|
||||
void (*set_cpus_allowed)(struct task_struct *p,
|
||||
const struct cpumask *newmask);
|
||||
|
||||
void (*rq_online)(struct rq *rq);
|
||||
void (*rq_offline)(struct rq *rq);
|
||||
#endif
|
||||
|
||||
void (*set_curr_task) (struct rq *rq);
|
||||
void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
|
||||
void (*task_fork) (struct task_struct *p);
|
||||
|
||||
void (*switched_from) (struct rq *this_rq, struct task_struct *task);
|
||||
void (*switched_to) (struct rq *this_rq, struct task_struct *task);
|
||||
void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
|
||||
int oldprio);
|
||||
|
||||
unsigned int (*get_rr_interval) (struct rq *rq,
|
||||
struct task_struct *task);
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
void (*task_move_group) (struct task_struct *p, int on_rq);
|
||||
#endif
|
||||
};
|
||||
|
||||
struct load_weight {
|
||||
unsigned long weight, inv_weight;
|
||||
};
|
||||
@@ -1274,8 +1107,10 @@ struct task_struct {
|
||||
int exit_code, exit_signal;
|
||||
int pdeath_signal; /* The signal sent when the parent dies */
|
||||
unsigned int jobctl; /* JOBCTL_*, siglock protected */
|
||||
/* ??? */
|
||||
|
||||
/* Used for emulating ABI behavior of previous Linux versions */
|
||||
unsigned int personality;
|
||||
|
||||
unsigned did_exec:1;
|
||||
unsigned in_execve:1; /* Tell the LSMs that the process is doing an
|
||||
* execve */
|
||||
@@ -1327,7 +1162,7 @@ struct task_struct {
|
||||
|
||||
cputime_t utime, stime, utimescaled, stimescaled;
|
||||
cputime_t gtime;
|
||||
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
||||
struct cputime prev_cputime;
|
||||
#endif
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
|
||||
@@ -2681,28 +2516,7 @@ extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
|
||||
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
|
||||
|
||||
#ifdef CONFIG_CGROUP_SCHED
|
||||
|
||||
extern struct task_group root_task_group;
|
||||
|
||||
extern struct task_group *sched_create_group(struct task_group *parent);
|
||||
extern void sched_online_group(struct task_group *tg,
|
||||
struct task_group *parent);
|
||||
extern void sched_destroy_group(struct task_group *tg);
|
||||
extern void sched_offline_group(struct task_group *tg);
|
||||
extern void sched_move_task(struct task_struct *tsk);
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
|
||||
extern unsigned long sched_group_shares(struct task_group *tg);
|
||||
#endif
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
extern int sched_group_set_rt_runtime(struct task_group *tg,
|
||||
long rt_runtime_us);
|
||||
extern long sched_group_rt_runtime(struct task_group *tg);
|
||||
extern int sched_group_set_rt_period(struct task_group *tg,
|
||||
long rt_period_us);
|
||||
extern long sched_group_rt_period(struct task_group *tg);
|
||||
extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
|
||||
#endif
|
||||
#endif /* CONFIG_CGROUP_SCHED */
|
||||
|
||||
extern int task_can_switch_user(struct user_struct *up,
|
||||
|
Reference in New Issue
Block a user