Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (27 commits) sched: Use correct macro to display sched_child_runs_first in /proc/sched_debug sched: No need for bootmem special cases sched: Revert nohz_ratelimit() for now sched: Reduce update_group_power() calls sched: Update rq->clock for nohz balanced cpus sched: Fix spelling of sibling sched, cpuset: Drop __cpuexit from cpu hotplug callbacks sched: Fix the racy usage of thread_group_cputimer() in fastpath_timer_check() sched: run_posix_cpu_timers: Don't check ->exit_state, use lock_task_sighand() sched: thread_group_cputime: Simplify, document the "alive" check sched: Remove the obsolete exit_state/signal hacks sched: task_tick_rt: Remove the obsolete ->signal != NULL check sched: __sched_setscheduler: Read the RLIMIT_RTPRIO value lockless sched: Fix comments to make them DocBook happy sched: Fix fix_small_capacity powerpc: Exclude arch_sd_sibiling_asym_packing() on UP powerpc: Enable asymmetric SMT scheduling on POWER7 sched: Add asymmetric group packing option for sibling domain sched: Fix capacity calculations for SMT4 sched: Change nohz idle load balancing logic to push model ...
This commit is contained in:
@@ -48,6 +48,31 @@ extern ssize_t arch_cpu_release(const char *, size_t);
|
||||
#endif
|
||||
struct notifier_block;
|
||||
|
||||
/*
|
||||
* CPU notifier priorities.
|
||||
*/
|
||||
enum {
|
||||
/*
|
||||
* SCHED_ACTIVE marks a cpu which is coming up active during
|
||||
* CPU_ONLINE and CPU_DOWN_FAILED and must be the first
|
||||
* notifier. CPUSET_ACTIVE adjusts cpuset according to
|
||||
* cpu_active mask right after SCHED_ACTIVE. During
|
||||
* CPU_DOWN_PREPARE, SCHED_INACTIVE and CPUSET_INACTIVE are
|
||||
* ordered in the similar way.
|
||||
*
|
||||
* This ordering guarantees consistent cpu_active mask and
|
||||
* migration behavior to all cpu notifiers.
|
||||
*/
|
||||
CPU_PRI_SCHED_ACTIVE = INT_MAX,
|
||||
CPU_PRI_CPUSET_ACTIVE = INT_MAX - 1,
|
||||
CPU_PRI_SCHED_INACTIVE = INT_MIN + 1,
|
||||
CPU_PRI_CPUSET_INACTIVE = INT_MIN,
|
||||
|
||||
/* migration should happen before other stuff but after perf */
|
||||
CPU_PRI_PERF = 20,
|
||||
CPU_PRI_MIGRATION = 10,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* Need to know about CPUs going up/down? */
|
||||
#if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
|
||||
|
@@ -20,6 +20,7 @@ extern int number_of_cpusets; /* How many cpusets are defined in system? */
|
||||
|
||||
extern int cpuset_init(void);
|
||||
extern void cpuset_init_smp(void);
|
||||
extern void cpuset_update_active_cpus(void);
|
||||
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
|
||||
extern int cpuset_cpus_allowed_fallback(struct task_struct *p);
|
||||
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
|
||||
@@ -132,6 +133,11 @@ static inline void set_mems_allowed(nodemask_t nodemask)
|
||||
static inline int cpuset_init(void) { return 0; }
|
||||
static inline void cpuset_init_smp(void) {}
|
||||
|
||||
static inline void cpuset_update_active_cpus(void)
|
||||
{
|
||||
partition_sched_domains(1, NULL, NULL);
|
||||
}
|
||||
|
||||
static inline void cpuset_cpus_allowed(struct task_struct *p,
|
||||
struct cpumask *mask)
|
||||
{
|
||||
|
@@ -1067,7 +1067,7 @@ static inline void perf_event_disable(struct perf_event *event) { }
|
||||
#define perf_cpu_notifier(fn) \
|
||||
do { \
|
||||
static struct notifier_block fn##_nb __cpuinitdata = \
|
||||
{ .notifier_call = fn, .priority = 20 }; \
|
||||
{ .notifier_call = fn, .priority = CPU_PRI_PERF }; \
|
||||
fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
|
||||
(void *)(unsigned long)smp_processor_id()); \
|
||||
fn(&fn##_nb, (unsigned long)CPU_STARTING, \
|
||||
|
@@ -272,19 +272,10 @@ extern int runqueue_is_locked(int cpu);
|
||||
|
||||
extern cpumask_var_t nohz_cpu_mask;
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
|
||||
extern int select_nohz_load_balancer(int cpu);
|
||||
extern int get_nohz_load_balancer(void);
|
||||
extern int nohz_ratelimit(int cpu);
|
||||
extern void select_nohz_load_balancer(int stop_tick);
|
||||
extern int get_nohz_timer_target(void);
|
||||
#else
|
||||
static inline int select_nohz_load_balancer(int cpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int nohz_ratelimit(int cpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void select_nohz_load_balancer(int stop_tick) { }
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -801,7 +792,7 @@ enum cpu_idle_type {
|
||||
#define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */
|
||||
#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
|
||||
#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
|
||||
|
||||
#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
|
||||
#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
|
||||
|
||||
enum powersavings_balance_level {
|
||||
@@ -836,6 +827,8 @@ static inline int sd_balance_for_package_power(void)
|
||||
return SD_PREFER_SIBLING;
|
||||
}
|
||||
|
||||
extern int __weak arch_sd_sibiling_asym_packing(void);
|
||||
|
||||
/*
|
||||
* Optimise SD flags for power savings:
|
||||
* SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings.
|
||||
@@ -857,7 +850,7 @@ struct sched_group {
|
||||
* CPU power of this group, SCHED_LOAD_SCALE being max power for a
|
||||
* single CPU.
|
||||
*/
|
||||
unsigned int cpu_power;
|
||||
unsigned int cpu_power, cpu_power_orig;
|
||||
|
||||
/*
|
||||
* The CPUs this group covers.
|
||||
@@ -1693,6 +1686,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
|
||||
#define PF_EXITING 0x00000004 /* getting shut down */
|
||||
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
|
||||
#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
|
||||
#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
|
||||
#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
|
||||
#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */
|
||||
#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
|
||||
@@ -1787,20 +1781,23 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Architectures can set this to 1 if they have specified
|
||||
* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
|
||||
* but then during bootup it turns out that sched_clock()
|
||||
* is reliable after all:
|
||||
* Do not use outside of architecture code which knows its limitations.
|
||||
*
|
||||
* sched_clock() has no promise of monotonicity or bounded drift between
|
||||
* CPUs, use (which you should not) requires disabling IRQs.
|
||||
*
|
||||
* Please use one of the three interfaces below.
|
||||
*/
|
||||
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
|
||||
extern int sched_clock_stable;
|
||||
#endif
|
||||
|
||||
/* ftrace calls sched_clock() directly */
|
||||
extern unsigned long long notrace sched_clock(void);
|
||||
/*
|
||||
* See the comment in kernel/sched_clock.c
|
||||
*/
|
||||
extern u64 cpu_clock(int cpu);
|
||||
extern u64 local_clock(void);
|
||||
extern u64 sched_clock_cpu(int cpu);
|
||||
|
||||
|
||||
extern void sched_clock_init(void);
|
||||
extern u64 sched_clock_cpu(int cpu);
|
||||
|
||||
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
|
||||
static inline void sched_clock_tick(void)
|
||||
@@ -1815,17 +1812,19 @@ static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
|
||||
{
|
||||
}
|
||||
#else
|
||||
/*
|
||||
* Architectures can set this to 1 if they have specified
|
||||
* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
|
||||
* but then during bootup it turns out that sched_clock()
|
||||
* is reliable after all:
|
||||
*/
|
||||
extern int sched_clock_stable;
|
||||
|
||||
extern void sched_clock_tick(void);
|
||||
extern void sched_clock_idle_sleep_event(void);
|
||||
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* For kernel-internal use: high-speed (but slightly incorrect) per-cpu
|
||||
* clock constructed from sched_clock():
|
||||
*/
|
||||
extern unsigned long long cpu_clock(int cpu);
|
||||
|
||||
extern unsigned long long
|
||||
task_sched_runtime(struct task_struct *task);
|
||||
extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
|
||||
|
@@ -103,6 +103,7 @@ int arch_update_cpu_topology(void);
|
||||
| 1*SD_SHARE_PKG_RESOURCES \
|
||||
| 0*SD_SERIALIZE \
|
||||
| 0*SD_PREFER_SIBLING \
|
||||
| arch_sd_sibling_asym_packing() \
|
||||
, \
|
||||
.last_balance = jiffies, \
|
||||
.balance_interval = 1, \
|
||||
|
Reference in New Issue
Block a user