Merge branch 'core/percpu' into stackprotector
Conflicts: arch/x86/include/asm/pda.h arch/x86/include/asm/system.h Also, moved include/asm-x86/stackprotector.h to arch/x86/include/asm. Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
@@ -250,7 +250,7 @@ extern void init_idle_bootup_task(struct task_struct *idle);
|
||||
extern int runqueue_is_locked(void);
|
||||
extern void task_rq_unlock_wait(struct task_struct *p);
|
||||
|
||||
extern cpumask_t nohz_cpu_mask;
|
||||
extern cpumask_var_t nohz_cpu_mask;
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
|
||||
extern int select_nohz_load_balancer(int cpu);
|
||||
#else
|
||||
@@ -284,7 +284,6 @@ long io_schedule_timeout(long timeout);
|
||||
|
||||
extern void cpu_init (void);
|
||||
extern void trap_init(void);
|
||||
extern void account_process_tick(struct task_struct *task, int user);
|
||||
extern void update_process_times(int user);
|
||||
extern void scheduler_tick(void);
|
||||
|
||||
@@ -387,6 +386,9 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
|
||||
(mm)->hiwater_vm = (mm)->total_vm; \
|
||||
} while (0)
|
||||
|
||||
#define get_mm_hiwater_rss(mm) max((mm)->hiwater_rss, get_mm_rss(mm))
|
||||
#define get_mm_hiwater_vm(mm) max((mm)->hiwater_vm, (mm)->total_vm)
|
||||
|
||||
extern void set_dumpable(struct mm_struct *mm, int value);
|
||||
extern int get_dumpable(struct mm_struct *mm);
|
||||
|
||||
@@ -758,20 +760,51 @@ enum cpu_idle_type {
|
||||
#define SD_SERIALIZE 1024 /* Only a single load balancing instance */
|
||||
#define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */
|
||||
|
||||
#define BALANCE_FOR_MC_POWER \
|
||||
(sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0)
|
||||
enum powersavings_balance_level {
|
||||
POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */
|
||||
POWERSAVINGS_BALANCE_BASIC, /* Fill one thread/core/package
|
||||
* first for long running threads
|
||||
*/
|
||||
POWERSAVINGS_BALANCE_WAKEUP, /* Also bias task wakeups to semi-idle
|
||||
* cpu package for power savings
|
||||
*/
|
||||
MAX_POWERSAVINGS_BALANCE_LEVELS
|
||||
};
|
||||
|
||||
#define BALANCE_FOR_PKG_POWER \
|
||||
((sched_mc_power_savings || sched_smt_power_savings) ? \
|
||||
SD_POWERSAVINGS_BALANCE : 0)
|
||||
extern int sched_mc_power_savings, sched_smt_power_savings;
|
||||
|
||||
#define test_sd_parent(sd, flag) ((sd->parent && \
|
||||
(sd->parent->flags & flag)) ? 1 : 0)
|
||||
static inline int sd_balance_for_mc_power(void)
|
||||
{
|
||||
if (sched_smt_power_savings)
|
||||
return SD_POWERSAVINGS_BALANCE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int sd_balance_for_package_power(void)
|
||||
{
|
||||
if (sched_mc_power_savings | sched_smt_power_savings)
|
||||
return SD_POWERSAVINGS_BALANCE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Optimise SD flags for power savings:
|
||||
* SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings.
|
||||
* Keep default SD flags if sched_{smt,mc}_power_saving=0
|
||||
*/
|
||||
|
||||
static inline int sd_power_saving_flags(void)
|
||||
{
|
||||
if (sched_mc_power_savings | sched_smt_power_savings)
|
||||
return SD_BALANCE_NEWIDLE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct sched_group {
|
||||
struct sched_group *next; /* Must be a circular list */
|
||||
cpumask_t cpumask;
|
||||
|
||||
/*
|
||||
* CPU power of this group, SCHED_LOAD_SCALE being max power for a
|
||||
@@ -784,8 +817,15 @@ struct sched_group {
|
||||
* (see include/linux/reciprocal_div.h)
|
||||
*/
|
||||
u32 reciprocal_cpu_power;
|
||||
|
||||
unsigned long cpumask[];
|
||||
};
|
||||
|
||||
static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
|
||||
{
|
||||
return to_cpumask(sg->cpumask);
|
||||
}
|
||||
|
||||
enum sched_domain_level {
|
||||
SD_LV_NONE = 0,
|
||||
SD_LV_SIBLING,
|
||||
@@ -809,7 +849,6 @@ struct sched_domain {
|
||||
struct sched_domain *parent; /* top domain must be null terminated */
|
||||
struct sched_domain *child; /* bottom domain must be null terminated */
|
||||
struct sched_group *groups; /* the balancing groups of the domain */
|
||||
cpumask_t span; /* span of all CPUs in this domain */
|
||||
unsigned long min_interval; /* Minimum balance interval ms */
|
||||
unsigned long max_interval; /* Maximum balance interval ms */
|
||||
unsigned int busy_factor; /* less balancing by factor if busy */
|
||||
@@ -864,18 +903,34 @@ struct sched_domain {
|
||||
#ifdef CONFIG_SCHED_DEBUG
|
||||
char *name;
|
||||
#endif
|
||||
|
||||
/* span of all CPUs in this domain */
|
||||
unsigned long span[];
|
||||
};
|
||||
|
||||
extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
|
||||
static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
|
||||
{
|
||||
return to_cpumask(sd->span);
|
||||
}
|
||||
|
||||
extern void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
|
||||
struct sched_domain_attr *dattr_new);
|
||||
extern int arch_reinit_sched_domains(void);
|
||||
|
||||
/* Test a flag in parent sched domain */
|
||||
static inline int test_sd_parent(struct sched_domain *sd, int flag)
|
||||
{
|
||||
if (sd->parent && (sd->parent->flags & flag))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
struct sched_domain_attr;
|
||||
|
||||
static inline void
|
||||
partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
|
||||
partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
|
||||
struct sched_domain_attr *dattr_new)
|
||||
{
|
||||
}
|
||||
@@ -926,7 +981,7 @@ struct sched_class {
|
||||
void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
|
||||
|
||||
void (*set_cpus_allowed)(struct task_struct *p,
|
||||
const cpumask_t *newmask);
|
||||
const struct cpumask *newmask);
|
||||
|
||||
void (*rq_online)(struct rq *rq);
|
||||
void (*rq_offline)(struct rq *rq);
|
||||
@@ -1578,12 +1633,12 @@ extern cputime_t task_gtime(struct task_struct *p);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
extern int set_cpus_allowed_ptr(struct task_struct *p,
|
||||
const cpumask_t *new_mask);
|
||||
const struct cpumask *new_mask);
|
||||
#else
|
||||
static inline int set_cpus_allowed_ptr(struct task_struct *p,
|
||||
const cpumask_t *new_mask)
|
||||
const struct cpumask *new_mask)
|
||||
{
|
||||
if (!cpu_isset(0, *new_mask))
|
||||
if (!cpumask_test_cpu(0, new_mask))
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
@@ -1650,16 +1705,16 @@ extern void wake_up_idle_cpu(int cpu);
|
||||
static inline void wake_up_idle_cpu(int cpu) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SCHED_DEBUG
|
||||
extern unsigned int sysctl_sched_latency;
|
||||
extern unsigned int sysctl_sched_min_granularity;
|
||||
extern unsigned int sysctl_sched_wakeup_granularity;
|
||||
extern unsigned int sysctl_sched_shares_ratelimit;
|
||||
extern unsigned int sysctl_sched_shares_thresh;
|
||||
#ifdef CONFIG_SCHED_DEBUG
|
||||
extern unsigned int sysctl_sched_child_runs_first;
|
||||
extern unsigned int sysctl_sched_features;
|
||||
extern unsigned int sysctl_sched_migration_cost;
|
||||
extern unsigned int sysctl_sched_nr_migrate;
|
||||
extern unsigned int sysctl_sched_shares_ratelimit;
|
||||
extern unsigned int sysctl_sched_shares_thresh;
|
||||
|
||||
int sched_nr_latency_handler(struct ctl_table *table, int write,
|
||||
struct file *file, void __user *buffer, size_t *length,
|
||||
@@ -2207,10 +2262,8 @@ __trace_special(void *__tr, void *__data,
|
||||
}
|
||||
#endif
|
||||
|
||||
extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask);
|
||||
extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
|
||||
|
||||
extern int sched_mc_power_savings, sched_smt_power_savings;
|
||||
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
|
||||
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
|
||||
|
||||
extern void normalize_rt_tasks(void);
|
||||
|
||||
|
Reference in New Issue
Block a user