Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar: "The main scheduler changes in this cycle were: - support Intel Turbo Boost Max Technology 3.0 (TBM3) by introducig a notion of 'better cores', which the scheduler will prefer to schedule single threaded workloads on. (Tim Chen, Srinivas Pandruvada) - enhance the handling of asymmetric capacity CPUs further (Morten Rasmussen) - improve/fix load handling when moving tasks between task groups (Vincent Guittot) - simplify and clean up the cputime code (Stanislaw Gruszka) - improve mass fork()ed task spread a.k.a. hackbench speedup (Vincent Guittot) - make struct kthread kmalloc()ed and related fixes (Oleg Nesterov) - add uaccess atomicity debugging (when using access_ok() in the wrong context), under CONFIG_DEBUG_ATOMIC_SLEEP=y (Peter Zijlstra) - implement various fixes, cleanups and other enhancements (Daniel Bristot de Oliveira, Martin Schwidefsky, Rafael J. Wysocki)" * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (41 commits) sched/core: Use load_avg for selecting idlest group sched/core: Fix find_idlest_group() for fork kthread: Don't abuse kthread_create_on_cpu() in __kthread_create_worker() kthread: Don't use to_live_kthread() in kthread_[un]park() kthread: Don't use to_live_kthread() in kthread_stop() Revert "kthread: Pin the stack via try_get_task_stack()/put_task_stack() in to_live_kthread() function" kthread: Make struct kthread kmalloc'ed x86/uaccess, sched/preempt: Verify access_ok() context sched/x86: Make CONFIG_SCHED_MC_PRIO=y easier to enable sched/x86: Change CONFIG_SCHED_ITMT to CONFIG_SCHED_MC_PRIO x86/sched: Use #include <linux/mutex.h> instead of #include <asm/mutex.h> cpufreq/intel_pstate: Use CPPC to get max performance acpi/bus: Set _OSC for diverse core support acpi/bus: Enable HWP CPPC objects x86/sched: Add SD_ASYM_PACKING flags to x86 ITMT CPU x86/sysctl: Add sysctl for ITMT scheduling feature x86: Enable Intel Turbo Boost Max Technology 3.0 x86/topology: Define x86's arch_update_cpu_topology sched: Extend scheduler's asym packing sched/fair: Clean up the tunable parameter definitions ...
This commit is contained in:
@@ -262,20 +262,9 @@ extern char ___assert_task_state[1 - 2*!!(
|
||||
#define set_task_state(tsk, state_value) \
|
||||
do { \
|
||||
(tsk)->task_state_change = _THIS_IP_; \
|
||||
smp_store_mb((tsk)->state, (state_value)); \
|
||||
smp_store_mb((tsk)->state, (state_value)); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* set_current_state() includes a barrier so that the write of current->state
|
||||
* is correctly serialised wrt the caller's subsequent test of whether to
|
||||
* actually sleep:
|
||||
*
|
||||
* set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
* if (do_i_need_to_sleep())
|
||||
* schedule();
|
||||
*
|
||||
* If the caller does not need such serialisation then use __set_current_state()
|
||||
*/
|
||||
#define __set_current_state(state_value) \
|
||||
do { \
|
||||
current->task_state_change = _THIS_IP_; \
|
||||
@@ -284,11 +273,19 @@ extern char ___assert_task_state[1 - 2*!!(
|
||||
#define set_current_state(state_value) \
|
||||
do { \
|
||||
current->task_state_change = _THIS_IP_; \
|
||||
smp_store_mb(current->state, (state_value)); \
|
||||
smp_store_mb(current->state, (state_value)); \
|
||||
} while (0)
|
||||
|
||||
#else
|
||||
|
||||
/*
|
||||
* @tsk had better be current, or you get to keep the pieces.
|
||||
*
|
||||
* The only reason is that computing current can be more expensive than
|
||||
* using a pointer that's already available.
|
||||
*
|
||||
* Therefore, see set_current_state().
|
||||
*/
|
||||
#define __set_task_state(tsk, state_value) \
|
||||
do { (tsk)->state = (state_value); } while (0)
|
||||
#define set_task_state(tsk, state_value) \
|
||||
@@ -299,11 +296,34 @@ extern char ___assert_task_state[1 - 2*!!(
|
||||
* is correctly serialised wrt the caller's subsequent test of whether to
|
||||
* actually sleep:
|
||||
*
|
||||
* for (;;) {
|
||||
* set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
* if (do_i_need_to_sleep())
|
||||
* schedule();
|
||||
* if (!need_sleep)
|
||||
* break;
|
||||
*
|
||||
* If the caller does not need such serialisation then use __set_current_state()
|
||||
* schedule();
|
||||
* }
|
||||
* __set_current_state(TASK_RUNNING);
|
||||
*
|
||||
* If the caller does not need such serialisation (because, for instance, the
|
||||
* condition test and condition change and wakeup are under the same lock) then
|
||||
* use __set_current_state().
|
||||
*
|
||||
* The above is typically ordered against the wakeup, which does:
|
||||
*
|
||||
* need_sleep = false;
|
||||
* wake_up_state(p, TASK_UNINTERRUPTIBLE);
|
||||
*
|
||||
* Where wake_up_state() (and all other wakeup primitives) imply enough
|
||||
* barriers to order the store of the variable against wakeup.
|
||||
*
|
||||
* Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
|
||||
* once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
|
||||
* TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
|
||||
*
|
||||
* This is obviously fine, since they both store the exact same value.
|
||||
*
|
||||
* Also see the comments of try_to_wake_up().
|
||||
*/
|
||||
#define __set_current_state(state_value) \
|
||||
do { current->state = (state_value); } while (0)
|
||||
@@ -1057,6 +1077,8 @@ static inline int cpu_numa_flags(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
extern int arch_asym_cpu_priority(int cpu);
|
||||
|
||||
struct sched_domain_attr {
|
||||
int relax_domain_level;
|
||||
};
|
||||
@@ -1627,7 +1649,10 @@ struct task_struct {
|
||||
int __user *set_child_tid; /* CLONE_CHILD_SETTID */
|
||||
int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
|
||||
|
||||
cputime_t utime, stime, utimescaled, stimescaled;
|
||||
cputime_t utime, stime;
|
||||
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
|
||||
cputime_t utimescaled, stimescaled;
|
||||
#endif
|
||||
cputime_t gtime;
|
||||
struct prev_cputime prev_cputime;
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
|
||||
@@ -2220,27 +2245,13 @@ struct task_struct *try_get_task_struct(struct task_struct **ptask);
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
|
||||
extern void task_cputime(struct task_struct *t,
|
||||
cputime_t *utime, cputime_t *stime);
|
||||
extern void task_cputime_scaled(struct task_struct *t,
|
||||
cputime_t *utimescaled, cputime_t *stimescaled);
|
||||
extern cputime_t task_gtime(struct task_struct *t);
|
||||
#else
|
||||
static inline void task_cputime(struct task_struct *t,
|
||||
cputime_t *utime, cputime_t *stime)
|
||||
{
|
||||
if (utime)
|
||||
*utime = t->utime;
|
||||
if (stime)
|
||||
*stime = t->stime;
|
||||
}
|
||||
|
||||
static inline void task_cputime_scaled(struct task_struct *t,
|
||||
cputime_t *utimescaled,
|
||||
cputime_t *stimescaled)
|
||||
{
|
||||
if (utimescaled)
|
||||
*utimescaled = t->utimescaled;
|
||||
if (stimescaled)
|
||||
*stimescaled = t->stimescaled;
|
||||
*utime = t->utime;
|
||||
*stime = t->stime;
|
||||
}
|
||||
|
||||
static inline cputime_t task_gtime(struct task_struct *t)
|
||||
@@ -2248,6 +2259,24 @@ static inline cputime_t task_gtime(struct task_struct *t)
|
||||
return t->gtime;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
|
||||
static inline void task_cputime_scaled(struct task_struct *t,
|
||||
cputime_t *utimescaled,
|
||||
cputime_t *stimescaled)
|
||||
{
|
||||
*utimescaled = t->utimescaled;
|
||||
*stimescaled = t->stimescaled;
|
||||
}
|
||||
#else
|
||||
static inline void task_cputime_scaled(struct task_struct *t,
|
||||
cputime_t *utimescaled,
|
||||
cputime_t *stimescaled)
|
||||
{
|
||||
task_cputime(t, utimescaled, stimescaled);
|
||||
}
|
||||
#endif
|
||||
|
||||
extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
|
||||
extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
|
||||
|
||||
|
Reference in New Issue
Block a user