Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (27 commits) sched: Use correct macro to display sched_child_runs_first in /proc/sched_debug sched: No need for bootmem special cases sched: Revert nohz_ratelimit() for now sched: Reduce update_group_power() calls sched: Update rq->clock for nohz balanced cpus sched: Fix spelling of sibling sched, cpuset: Drop __cpuexit from cpu hotplug callbacks sched: Fix the racy usage of thread_group_cputimer() in fastpath_timer_check() sched: run_posix_cpu_timers: Don't check ->exit_state, use lock_task_sighand() sched: thread_group_cputime: Simplify, document the "alive" check sched: Remove the obsolete exit_state/signal hacks sched: task_tick_rt: Remove the obsolete ->signal != NULL check sched: __sched_setscheduler: Read the RLIMIT_RTPRIO value lockless sched: Fix comments to make them DocBook happy sched: Fix fix_small_capacity powerpc: Exclude arch_sd_sibiling_asym_packing() on UP powerpc: Enable asymmetric SMT scheduling on POWER7 sched: Add asymmetric group packing option for sibling domain sched: Fix capacity calculations for SMT4 sched: Change nohz idle load balancing logic to push model ...
This commit is contained in:
@@ -272,19 +272,10 @@ extern int runqueue_is_locked(int cpu);
|
||||
|
||||
extern cpumask_var_t nohz_cpu_mask;
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
|
||||
extern int select_nohz_load_balancer(int cpu);
|
||||
extern int get_nohz_load_balancer(void);
|
||||
extern int nohz_ratelimit(int cpu);
|
||||
extern void select_nohz_load_balancer(int stop_tick);
|
||||
extern int get_nohz_timer_target(void);
|
||||
#else
|
||||
static inline int select_nohz_load_balancer(int cpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int nohz_ratelimit(int cpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void select_nohz_load_balancer(int stop_tick) { }
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -801,7 +792,7 @@ enum cpu_idle_type {
|
||||
#define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */
|
||||
#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
|
||||
#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
|
||||
|
||||
#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
|
||||
#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
|
||||
|
||||
enum powersavings_balance_level {
|
||||
@@ -836,6 +827,8 @@ static inline int sd_balance_for_package_power(void)
|
||||
return SD_PREFER_SIBLING;
|
||||
}
|
||||
|
||||
extern int __weak arch_sd_sibiling_asym_packing(void);
|
||||
|
||||
/*
|
||||
* Optimise SD flags for power savings:
|
||||
* SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings.
|
||||
@@ -857,7 +850,7 @@ struct sched_group {
|
||||
* CPU power of this group, SCHED_LOAD_SCALE being max power for a
|
||||
* single CPU.
|
||||
*/
|
||||
unsigned int cpu_power;
|
||||
unsigned int cpu_power, cpu_power_orig;
|
||||
|
||||
/*
|
||||
* The CPUs this group covers.
|
||||
@@ -1693,6 +1686,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
|
||||
#define PF_EXITING 0x00000004 /* getting shut down */
|
||||
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
|
||||
#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
|
||||
#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
|
||||
#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
|
||||
#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */
|
||||
#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
|
||||
@@ -1787,20 +1781,23 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Architectures can set this to 1 if they have specified
|
||||
* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
|
||||
* but then during bootup it turns out that sched_clock()
|
||||
* is reliable after all:
|
||||
* Do not use outside of architecture code which knows its limitations.
|
||||
*
|
||||
* sched_clock() has no promise of monotonicity or bounded drift between
|
||||
* CPUs, use (which you should not) requires disabling IRQs.
|
||||
*
|
||||
* Please use one of the three interfaces below.
|
||||
*/
|
||||
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
|
||||
extern int sched_clock_stable;
|
||||
#endif
|
||||
|
||||
/* ftrace calls sched_clock() directly */
|
||||
extern unsigned long long notrace sched_clock(void);
|
||||
/*
|
||||
* See the comment in kernel/sched_clock.c
|
||||
*/
|
||||
extern u64 cpu_clock(int cpu);
|
||||
extern u64 local_clock(void);
|
||||
extern u64 sched_clock_cpu(int cpu);
|
||||
|
||||
|
||||
extern void sched_clock_init(void);
|
||||
extern u64 sched_clock_cpu(int cpu);
|
||||
|
||||
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
|
||||
static inline void sched_clock_tick(void)
|
||||
@@ -1815,17 +1812,19 @@ static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
|
||||
{
|
||||
}
|
||||
#else
|
||||
/*
|
||||
* Architectures can set this to 1 if they have specified
|
||||
* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
|
||||
* but then during bootup it turns out that sched_clock()
|
||||
* is reliable after all:
|
||||
*/
|
||||
extern int sched_clock_stable;
|
||||
|
||||
extern void sched_clock_tick(void);
|
||||
extern void sched_clock_idle_sleep_event(void);
|
||||
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* For kernel-internal use: high-speed (but slightly incorrect) per-cpu
|
||||
* clock constructed from sched_clock():
|
||||
*/
|
||||
extern unsigned long long cpu_clock(int cpu);
|
||||
|
||||
extern unsigned long long
|
||||
task_sched_runtime(struct task_struct *task);
|
||||
extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
|
||||
|
Reference in New Issue
Block a user