Merge e4cbce4d13 ("Merge tag 'sched-core-2020-08-03' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip") into android-mainline

Baby steps for 5.9-rc1

Resolves some kernel/sched/ merge issues.

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I88cf5411ac7251f9795d9c50cb18b0df5bf0bcd6
This commit is contained in:
Greg Kroah-Hartman
2020-08-07 14:17:39 +02:00
48 changed files with 1522 additions and 332 deletions

View File

@@ -155,24 +155,24 @@ struct task_group;
*
* for (;;) {
* set_current_state(TASK_UNINTERRUPTIBLE);
* if (!need_sleep)
* break;
* if (CONDITION)
* break;
*
* schedule();
* }
* __set_current_state(TASK_RUNNING);
*
* If the caller does not need such serialisation (because, for instance, the
* condition test and condition change and wakeup are under the same lock) then
* CONDITION test and condition change and wakeup are under the same lock) then
* use __set_current_state().
*
* The above is typically ordered against the wakeup, which does:
*
* need_sleep = false;
* CONDITION = 1;
* wake_up_state(p, TASK_UNINTERRUPTIBLE);
*
* where wake_up_state() executes a full memory barrier before accessing the
* task state.
* where wake_up_state()/try_to_wake_up() executes a full memory barrier before
* accessing p->state.
*
* Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
* once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
@@ -375,7 +375,7 @@ struct util_est {
* For cfs_rq, they are the aggregated values of all runnable and blocked
* sched_entities.
*
* The load/runnable/util_avg doesn't direcly factor frequency scaling and CPU
* The load/runnable/util_avg doesn't directly factor frequency scaling and CPU
* capacity scaling. The scaling is done through the rq_clock_pelt that is used
* for computing those signals (see update_rq_clock_pelt())
*
@@ -687,9 +687,15 @@ struct task_struct {
struct sched_dl_entity dl;
#ifdef CONFIG_UCLAMP_TASK
/* Clamp values requested for a scheduling entity */
/*
* Clamp values requested for a scheduling entity.
* Must be updated with task_rq_lock() held.
*/
struct uclamp_se uclamp_req[UCLAMP_CNT];
/* Effective clamp values used for a scheduling entity */
/*
* Effective clamp values used for a scheduling entity.
* Must be updated with task_rq_lock() held.
*/
struct uclamp_se uclamp[UCLAMP_CNT];
#endif
@@ -2043,6 +2049,7 @@ const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq);
const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq);
int sched_trace_rq_cpu(struct rq *rq);
int sched_trace_rq_nr_running(struct rq *rq);
const struct cpumask *sched_trace_rd_span(struct root_domain *rd);