Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Thomas Gleixner: - Cleanup and improvement of NUMA balancing - Refactoring and improvements to the PELT (Per Entity Load Tracking) code - Watchdog simplification and related cleanups - The usual pile of small incremental fixes and improvements * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (41 commits) watchdog: Reduce message verbosity stop_machine: Reflow cpu_stop_queue_two_works() sched/numa: Move task_numa_placement() closer to numa_migrate_preferred() sched/numa: Use group_weights to identify if migration degrades locality sched/numa: Update the scan period without holding the numa_group lock sched/numa: Remove numa_has_capacity() sched/numa: Modify migrate_swap() to accept additional parameters sched/numa: Remove unused task_capacity from 'struct numa_stats' sched/numa: Skip nodes that are at 'hoplimit' sched/debug: Reverse the order of printing faults sched/numa: Use task faults only if numa_group is not yet set up sched/numa: Set preferred_node based on best_cpu sched/numa: Simplify load_too_imbalanced() sched/numa: Evaluate move once per node sched/numa: Remove redundant field sched/debug: Show the sum wait time of a task group sched/fair: Remove #ifdefs from scale_rt_capacity() sched/core: Remove get_cpu() from sched_fork() sched/cpufreq: Clarify sugov_get_util() sched/sysctl: Remove unused sched_time_avg_ms sysctl ...
This commit is contained in:
@@ -164,6 +164,7 @@ enum cpuhp_state {
|
||||
CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
|
||||
CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
|
||||
CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
|
||||
CPUHP_AP_WATCHDOG_ONLINE,
|
||||
CPUHP_AP_WORKQUEUE_ONLINE,
|
||||
CPUHP_AP_RCUTREE_ONLINE,
|
||||
CPUHP_AP_ONLINE_DYN,
|
||||
|
@@ -45,12 +45,18 @@ extern void touch_softlockup_watchdog(void);
|
||||
extern void touch_softlockup_watchdog_sync(void);
|
||||
extern void touch_all_softlockup_watchdogs(void);
|
||||
extern unsigned int softlockup_panic;
|
||||
#else
|
||||
|
||||
extern int lockup_detector_online_cpu(unsigned int cpu);
|
||||
extern int lockup_detector_offline_cpu(unsigned int cpu);
|
||||
#else /* CONFIG_SOFTLOCKUP_DETECTOR */
|
||||
static inline void touch_softlockup_watchdog_sched(void) { }
|
||||
static inline void touch_softlockup_watchdog(void) { }
|
||||
static inline void touch_softlockup_watchdog_sync(void) { }
|
||||
static inline void touch_all_softlockup_watchdogs(void) { }
|
||||
#endif
|
||||
|
||||
#define lockup_detector_online_cpu NULL
|
||||
#define lockup_detector_offline_cpu NULL
|
||||
#endif /* CONFIG_SOFTLOCKUP_DETECTOR */
|
||||
|
||||
#ifdef CONFIG_DETECT_HUNG_TASK
|
||||
void reset_hung_task_detector(void);
|
||||
|
@@ -1017,7 +1017,6 @@ struct task_struct {
|
||||
u64 last_sum_exec_runtime;
|
||||
struct callback_head numa_work;
|
||||
|
||||
struct list_head numa_entry;
|
||||
struct numa_group *numa_group;
|
||||
|
||||
/*
|
||||
|
@@ -40,7 +40,6 @@ extern unsigned int sysctl_numa_balancing_scan_size;
|
||||
#ifdef CONFIG_SCHED_DEBUG
|
||||
extern __read_mostly unsigned int sysctl_sched_migration_cost;
|
||||
extern __read_mostly unsigned int sysctl_sched_nr_migrate;
|
||||
extern __read_mostly unsigned int sysctl_sched_time_avg;
|
||||
|
||||
int sched_proc_update_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *length,
|
||||
|
@@ -25,8 +25,6 @@ struct smpboot_thread_data;
|
||||
* parked (cpu offline)
|
||||
* @unpark: Optional unpark function, called when the thread is
|
||||
* unparked (cpu online)
|
||||
* @cpumask: Internal state. To update which threads are unparked,
|
||||
* call smpboot_update_cpumask_percpu_thread().
|
||||
* @selfparking: Thread is not parked by the park function.
|
||||
* @thread_comm: The base name of the thread
|
||||
*/
|
||||
@@ -40,23 +38,12 @@ struct smp_hotplug_thread {
|
||||
void (*cleanup)(unsigned int cpu, bool online);
|
||||
void (*park)(unsigned int cpu);
|
||||
void (*unpark)(unsigned int cpu);
|
||||
cpumask_var_t cpumask;
|
||||
bool selfparking;
|
||||
const char *thread_comm;
|
||||
};
|
||||
|
||||
int smpboot_register_percpu_thread_cpumask(struct smp_hotplug_thread *plug_thread,
|
||||
const struct cpumask *cpumask);
|
||||
|
||||
static inline int
|
||||
smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
|
||||
{
|
||||
return smpboot_register_percpu_thread_cpumask(plug_thread,
|
||||
cpu_possible_mask);
|
||||
}
|
||||
int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread);
|
||||
|
||||
void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread);
|
||||
void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
|
||||
const struct cpumask *);
|
||||
|
||||
#endif
|
||||
|
@@ -16,7 +16,7 @@
|
||||
* wait-queues, but the semantics are actually completely different, and
|
||||
* every single user we have ever had has been buggy (or pointless).
|
||||
*
|
||||
* A "swake_up()" only wakes up _one_ waiter, which is not at all what
|
||||
* A "swake_up_one()" only wakes up _one_ waiter, which is not at all what
|
||||
* "wake_up()" does, and has led to problems. In other cases, it has
|
||||
* been fine, because there's only ever one waiter (kvm), but in that
|
||||
* case gthe whole "simple" wait-queue is just pointless to begin with,
|
||||
@@ -38,8 +38,8 @@
|
||||
* all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right
|
||||
* sleeper state.
|
||||
*
|
||||
* - the exclusive mode; because this requires preserving the list order
|
||||
* and this is hard.
|
||||
* - the !exclusive mode; because that leads to O(n) wakeups, everything is
|
||||
* exclusive.
|
||||
*
|
||||
* - custom wake callback functions; because you cannot give any guarantees
|
||||
* about random code. This also allows swait to be used in RT, such that
|
||||
@@ -115,7 +115,7 @@ extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name
|
||||
* CPU0 - waker CPU1 - waiter
|
||||
*
|
||||
* for (;;) {
|
||||
* @cond = true; prepare_to_swait(&wq_head, &wait, state);
|
||||
* @cond = true; prepare_to_swait_exclusive(&wq_head, &wait, state);
|
||||
* smp_mb(); // smp_mb() from set_current_state()
|
||||
* if (swait_active(wq_head)) if (@cond)
|
||||
* wake_up(wq_head); break;
|
||||
@@ -157,20 +157,20 @@ static inline bool swq_has_sleeper(struct swait_queue_head *wq)
|
||||
return swait_active(wq);
|
||||
}
|
||||
|
||||
extern void swake_up(struct swait_queue_head *q);
|
||||
extern void swake_up_one(struct swait_queue_head *q);
|
||||
extern void swake_up_all(struct swait_queue_head *q);
|
||||
extern void swake_up_locked(struct swait_queue_head *q);
|
||||
|
||||
extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
|
||||
extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state);
|
||||
extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state);
|
||||
extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state);
|
||||
|
||||
extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
|
||||
extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
|
||||
|
||||
/* as per ___wait_event() but for swait, therefore "exclusive == 0" */
|
||||
/* as per ___wait_event() but for swait, therefore "exclusive == 1" */
|
||||
#define ___swait_event(wq, condition, state, ret, cmd) \
|
||||
({ \
|
||||
__label__ __out; \
|
||||
struct swait_queue __wait; \
|
||||
long __ret = ret; \
|
||||
\
|
||||
@@ -183,20 +183,20 @@ extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
|
||||
\
|
||||
if (___wait_is_interruptible(state) && __int) { \
|
||||
__ret = __int; \
|
||||
break; \
|
||||
goto __out; \
|
||||
} \
|
||||
\
|
||||
cmd; \
|
||||
} \
|
||||
finish_swait(&wq, &__wait); \
|
||||
__ret; \
|
||||
__out: __ret; \
|
||||
})
|
||||
|
||||
#define __swait_event(wq, condition) \
|
||||
(void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \
|
||||
schedule())
|
||||
|
||||
#define swait_event(wq, condition) \
|
||||
#define swait_event_exclusive(wq, condition) \
|
||||
do { \
|
||||
if (condition) \
|
||||
break; \
|
||||
@@ -208,7 +208,7 @@ do { \
|
||||
TASK_UNINTERRUPTIBLE, timeout, \
|
||||
__ret = schedule_timeout(__ret))
|
||||
|
||||
#define swait_event_timeout(wq, condition, timeout) \
|
||||
#define swait_event_timeout_exclusive(wq, condition, timeout) \
|
||||
({ \
|
||||
long __ret = timeout; \
|
||||
if (!___wait_cond_timeout(condition)) \
|
||||
@@ -220,7 +220,7 @@ do { \
|
||||
___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \
|
||||
schedule())
|
||||
|
||||
#define swait_event_interruptible(wq, condition) \
|
||||
#define swait_event_interruptible_exclusive(wq, condition) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (!(condition)) \
|
||||
@@ -233,7 +233,7 @@ do { \
|
||||
TASK_INTERRUPTIBLE, timeout, \
|
||||
__ret = schedule_timeout(__ret))
|
||||
|
||||
#define swait_event_interruptible_timeout(wq, condition, timeout) \
|
||||
#define swait_event_interruptible_timeout_exclusive(wq, condition, timeout)\
|
||||
({ \
|
||||
long __ret = timeout; \
|
||||
if (!___wait_cond_timeout(condition)) \
|
||||
@@ -246,7 +246,7 @@ do { \
|
||||
(void)___swait_event(wq, condition, TASK_IDLE, 0, schedule())
|
||||
|
||||
/**
|
||||
* swait_event_idle - wait without system load contribution
|
||||
* swait_event_idle_exclusive - wait without system load contribution
|
||||
* @wq: the waitqueue to wait on
|
||||
* @condition: a C expression for the event to wait for
|
||||
*
|
||||
@@ -257,7 +257,7 @@ do { \
|
||||
* condition and doesn't want to contribute to system load. Signals are
|
||||
* ignored.
|
||||
*/
|
||||
#define swait_event_idle(wq, condition) \
|
||||
#define swait_event_idle_exclusive(wq, condition) \
|
||||
do { \
|
||||
if (condition) \
|
||||
break; \
|
||||
@@ -270,7 +270,7 @@ do { \
|
||||
__ret = schedule_timeout(__ret))
|
||||
|
||||
/**
|
||||
* swait_event_idle_timeout - wait up to timeout without load contribution
|
||||
* swait_event_idle_timeout_exclusive - wait up to timeout without load contribution
|
||||
* @wq: the waitqueue to wait on
|
||||
* @condition: a C expression for the event to wait for
|
||||
* @timeout: timeout at which we'll give up in jiffies
|
||||
@@ -288,7 +288,7 @@ do { \
|
||||
* or the remaining jiffies (at least 1) if the @condition evaluated
|
||||
* to %true before the @timeout elapsed.
|
||||
*/
|
||||
#define swait_event_idle_timeout(wq, condition, timeout) \
|
||||
#define swait_event_idle_timeout_exclusive(wq, condition, timeout) \
|
||||
({ \
|
||||
long __ret = timeout; \
|
||||
if (!___wait_cond_timeout(condition)) \
|
||||
|
Reference in New Issue
Block a user