sched/fair: Force gold cpus to do idle lb when silver has big tasks

We often see traces that gold cpus goes to idle even when silver
has big tasks. This is because avg_idle checks does not allow
the gold cpus to enter idle balance. This is NOT okay when
silver CPUs have big tasks.

Change-Id: Ice00d43c70be105b5bc3ad09caa6047aa98d7402
Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
Signed-off-by: Runmin Wang <runminw@codeaurora.org>
[satyap@codeaurora.org: port to 5.4 and add WALT checks]
Signed-off-by: Satya Durga Srinivasu Prabhala <satyap@codeaurora.org>
This commit is contained in:
Pavankumar Kondeti
2019-04-02 10:48:54 -07:00
committed by Satya Durga Srinivasu Prabhala
parent 24a21204c9
commit ac0eba9b32

View File

@@ -11121,6 +11121,27 @@ static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle
static inline void nohz_newidle_balance(struct rq *this_rq) { }
#endif /* CONFIG_NO_HZ_COMMON */
#ifdef CONFIG_SCHED_WALT
static bool silver_has_big_tasks(void)
{
int cpu;
for_each_possible_cpu(cpu) {
if (!is_min_capacity_cpu(cpu))
break;
if (cpu_rq(cpu)->walt_stats.nr_big_tasks)
return true;
}
return false;
}
#else
static inline bool silver_has_big_tasks(void)
{
return false;
}
#endif
/*
* idle_balance is called by schedule() if this_cpu is about to become
* idle. Attempts to pull tasks from other CPUs.
@@ -11132,6 +11153,7 @@ int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
struct sched_domain *sd;
int pulled_task = 0;
u64 curr_cost = 0;
u64 avg_idle = this_rq->avg_idle;
if (cpu_isolated(this_cpu))
return 0;
@@ -11148,7 +11170,8 @@ int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
*/
if (!cpu_active(this_cpu))
return 0;
if (!is_min_capacity_cpu(this_cpu) && silver_has_big_tasks())
avg_idle = ULLONG_MAX;
/*
* This is OK, because current is on_cpu, which avoids it being picked
* for load-balance and preemption/IRQs are still disabled avoiding
@@ -11157,7 +11180,7 @@ int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
*/
rq_unpin_lock(this_rq, rf);
if (this_rq->avg_idle < sysctl_sched_migration_cost ||
if (avg_idle < sysctl_sched_migration_cost ||
!READ_ONCE(this_rq->rd->overload)) {
rcu_read_lock();
@@ -11182,7 +11205,7 @@ int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
if (!(sd->flags & SD_LOAD_BALANCE))
continue;
if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
if (avg_idle < curr_cost + sd->max_newidle_lb_cost) {
update_next_balance(sd, &next_balance);
break;
}