Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Thomas Gleixner: - Cleanup and improvement of NUMA balancing - Refactoring and improvements to the PELT (Per Entity Load Tracking) code - Watchdog simplification and related cleanups - The usual pile of small incremental fixes and improvements * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (41 commits) watchdog: Reduce message verbosity stop_machine: Reflow cpu_stop_queue_two_works() sched/numa: Move task_numa_placement() closer to numa_migrate_preferred() sched/numa: Use group_weights to identify if migration degrades locality sched/numa: Update the scan period without holding the numa_group lock sched/numa: Remove numa_has_capacity() sched/numa: Modify migrate_swap() to accept additional parameters sched/numa: Remove unused task_capacity from 'struct numa_stats' sched/numa: Skip nodes that are at 'hoplimit' sched/debug: Reverse the order of printing faults sched/numa: Use task faults only if numa_group is not yet set up sched/numa: Set preferred_node based on best_cpu sched/numa: Simplify load_too_imbalanced() sched/numa: Evaluate move once per node sched/numa: Remove redundant field sched/debug: Show the sum wait time of a task group sched/fair: Remove #ifdefs from scale_rt_capacity() sched/core: Remove get_cpu() from sched_fork() sched/cpufreq: Clarify sugov_get_util() sched/sysctl: Remove unused sched_time_avg_ms sysctl ...
This commit is contained in:
@@ -110,7 +110,7 @@ void __srcu_read_unlock(struct srcu_struct *sp, int idx)
|
||||
|
||||
WRITE_ONCE(sp->srcu_lock_nesting[idx], newval);
|
||||
if (!newval && READ_ONCE(sp->srcu_gp_waiting))
|
||||
swake_up(&sp->srcu_wq);
|
||||
swake_up_one(&sp->srcu_wq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__srcu_read_unlock);
|
||||
|
||||
@@ -140,7 +140,7 @@ void srcu_drive_gp(struct work_struct *wp)
|
||||
idx = sp->srcu_idx;
|
||||
WRITE_ONCE(sp->srcu_idx, !sp->srcu_idx);
|
||||
WRITE_ONCE(sp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */
|
||||
swait_event(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx]));
|
||||
swait_event_exclusive(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx]));
|
||||
WRITE_ONCE(sp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
|
||||
|
||||
/* Invoke the callbacks we removed above. */
|
||||
|
@@ -1701,7 +1701,7 @@ static void rcu_gp_kthread_wake(struct rcu_state *rsp)
|
||||
!READ_ONCE(rsp->gp_flags) ||
|
||||
!rsp->gp_kthread)
|
||||
return;
|
||||
swake_up(&rsp->gp_wq);
|
||||
swake_up_one(&rsp->gp_wq);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2015,7 +2015,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper function for swait_event_idle() wakeup at force-quiescent-state
|
||||
* Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
|
||||
* time.
|
||||
*/
|
||||
static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp)
|
||||
@@ -2163,7 +2163,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
|
||||
READ_ONCE(rsp->gp_seq),
|
||||
TPS("reqwait"));
|
||||
rsp->gp_state = RCU_GP_WAIT_GPS;
|
||||
swait_event_idle(rsp->gp_wq, READ_ONCE(rsp->gp_flags) &
|
||||
swait_event_idle_exclusive(rsp->gp_wq, READ_ONCE(rsp->gp_flags) &
|
||||
RCU_GP_FLAG_INIT);
|
||||
rsp->gp_state = RCU_GP_DONE_GPS;
|
||||
/* Locking provides needed memory barrier. */
|
||||
@@ -2191,7 +2191,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
|
||||
READ_ONCE(rsp->gp_seq),
|
||||
TPS("fqswait"));
|
||||
rsp->gp_state = RCU_GP_WAIT_FQS;
|
||||
ret = swait_event_idle_timeout(rsp->gp_wq,
|
||||
ret = swait_event_idle_timeout_exclusive(rsp->gp_wq,
|
||||
rcu_gp_fqs_check_wake(rsp, &gf), j);
|
||||
rsp->gp_state = RCU_GP_DOING_FQS;
|
||||
/* Locking provides needed memory barriers. */
|
||||
|
@@ -212,7 +212,7 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
if (wake) {
|
||||
smp_mb(); /* EGP done before wake_up(). */
|
||||
swake_up(&rsp->expedited_wq);
|
||||
swake_up_one(&rsp->expedited_wq);
|
||||
}
|
||||
break;
|
||||
}
|
||||
@@ -526,7 +526,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
|
||||
jiffies_start = jiffies;
|
||||
|
||||
for (;;) {
|
||||
ret = swait_event_timeout(
|
||||
ret = swait_event_timeout_exclusive(
|
||||
rsp->expedited_wq,
|
||||
sync_rcu_preempt_exp_done_unlocked(rnp_root),
|
||||
jiffies_stall);
|
||||
|
@@ -1926,8 +1926,8 @@ static void __wake_nocb_leader(struct rcu_data *rdp, bool force,
|
||||
WRITE_ONCE(rdp_leader->nocb_leader_sleep, false);
|
||||
del_timer(&rdp->nocb_timer);
|
||||
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
|
||||
smp_mb(); /* ->nocb_leader_sleep before swake_up(). */
|
||||
swake_up(&rdp_leader->nocb_wq);
|
||||
smp_mb(); /* ->nocb_leader_sleep before swake_up_one(). */
|
||||
swake_up_one(&rdp_leader->nocb_wq);
|
||||
} else {
|
||||
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
|
||||
}
|
||||
@@ -2159,7 +2159,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
|
||||
*/
|
||||
trace_rcu_this_gp(rnp, rdp, c, TPS("StartWait"));
|
||||
for (;;) {
|
||||
swait_event_interruptible(
|
||||
swait_event_interruptible_exclusive(
|
||||
rnp->nocb_gp_wq[rcu_seq_ctr(c) & 0x1],
|
||||
(d = rcu_seq_done(&rnp->gp_seq, c)));
|
||||
if (likely(d))
|
||||
@@ -2188,7 +2188,7 @@ wait_again:
|
||||
/* Wait for callbacks to appear. */
|
||||
if (!rcu_nocb_poll) {
|
||||
trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, TPS("Sleep"));
|
||||
swait_event_interruptible(my_rdp->nocb_wq,
|
||||
swait_event_interruptible_exclusive(my_rdp->nocb_wq,
|
||||
!READ_ONCE(my_rdp->nocb_leader_sleep));
|
||||
raw_spin_lock_irqsave(&my_rdp->nocb_lock, flags);
|
||||
my_rdp->nocb_leader_sleep = true;
|
||||
@@ -2253,7 +2253,7 @@ wait_again:
|
||||
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
|
||||
if (rdp != my_rdp && tail == &rdp->nocb_follower_head) {
|
||||
/* List was empty, so wake up the follower. */
|
||||
swake_up(&rdp->nocb_wq);
|
||||
swake_up_one(&rdp->nocb_wq);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2270,7 +2270,7 @@ static void nocb_follower_wait(struct rcu_data *rdp)
|
||||
{
|
||||
for (;;) {
|
||||
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("FollowerSleep"));
|
||||
swait_event_interruptible(rdp->nocb_wq,
|
||||
swait_event_interruptible_exclusive(rdp->nocb_wq,
|
||||
READ_ONCE(rdp->nocb_follower_head));
|
||||
if (smp_load_acquire(&rdp->nocb_follower_head)) {
|
||||
/* ^^^ Ensure CB invocation follows _head test. */
|
||||
|
Reference in New Issue
Block a user