BACKPORT: FROMLIST: cpuset: Honour task_cpu_possible_mask() in guarantee_online_cpus()
Asymmetric systems may not offer the same level of userspace ISA support across all CPUs, meaning that some applications cannot be executed by some CPUs. As a concrete example, upcoming arm64 big.LITTLE designs do not feature support for 32-bit applications on both clusters. Modify guarantee_online_cpus() to take task_cpu_possible_mask() into account when trying to find a suitable set of online CPUs for a given task. This will avoid passing an invalid mask to set_cpus_allowed_ptr() during ->attach() and will subsequently allow the cpuset hierarchy to be taken into account when forcefully overriding the affinity mask for a task which requires migration to a compatible CPU. Cc: Li Zefan <lizefan@huawei.com> Cc: Tejun Heo <tj@kernel.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Will Deacon <will@kernel.org> Bug: 178507149 Link: https://lore.kernel.org/linux-arch/20201208132835.6151-9-will@kernel.org/ [will: Fixed conflict due to active_mask being used instead of online_mask] Signed-off-by: Will Deacon <willdeacon@google.com> Change-Id: I5b4a50e7a257af928dccd87f1dbd961ea26ff834
This commit is contained in:
@@ -15,6 +15,7 @@
|
|||||||
#include <linux/cpumask.h>
|
#include <linux/cpumask.h>
|
||||||
#include <linux/nodemask.h>
|
#include <linux/nodemask.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
|
#include <linux/mmu_context.h>
|
||||||
#include <linux/jump_label.h>
|
#include <linux/jump_label.h>
|
||||||
|
|
||||||
#ifdef CONFIG_CPUSETS
|
#ifdef CONFIG_CPUSETS
|
||||||
@@ -181,7 +182,7 @@ static inline void cpuset_wait_for_hotplug(void) { }
|
|||||||
static inline void cpuset_cpus_allowed(struct task_struct *p,
|
static inline void cpuset_cpus_allowed(struct task_struct *p,
|
||||||
struct cpumask *mask)
|
struct cpumask *mask)
|
||||||
{
|
{
|
||||||
cpumask_copy(mask, cpu_possible_mask);
|
cpumask_copy(mask, task_cpu_possible_mask(p));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
|
static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
|
||||||
|
@@ -364,18 +364,26 @@ static inline bool is_in_v2_mode(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return in pmask the portion of a cpusets's cpus_allowed that
|
* Return in pmask the portion of a task's cpusets's cpus_allowed that
|
||||||
* are online. If none are online, walk up the cpuset hierarchy
|
* are online and are capable of running the task. If none are found,
|
||||||
* until we find one that does have some online cpus.
|
* walk up the cpuset hierarchy until we find one that does have some
|
||||||
|
* appropriate cpus.
|
||||||
*
|
*
|
||||||
* One way or another, we guarantee to return some non-empty subset
|
* One way or another, we guarantee to return some non-empty subset
|
||||||
* of cpu_active_mask.
|
* of cpu_active_mask.
|
||||||
*
|
*
|
||||||
* Call with callback_lock or cpuset_mutex held.
|
* Call with callback_lock or cpuset_mutex held.
|
||||||
*/
|
*/
|
||||||
static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
|
static void guarantee_online_cpus(struct task_struct *tsk,
|
||||||
|
struct cpumask *pmask)
|
||||||
{
|
{
|
||||||
while (!cpumask_intersects(cs->effective_cpus, cpu_active_mask)) {
|
struct cpuset *cs = task_cs(tsk);
|
||||||
|
const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
|
||||||
|
|
||||||
|
if (WARN_ON(!cpumask_and(pmask, possible_mask, cpu_active_mask)))
|
||||||
|
cpumask_copy(pmask, cpu_active_mask);
|
||||||
|
|
||||||
|
while (!cpumask_intersects(cs->effective_cpus, pmask)) {
|
||||||
cs = parent_cs(cs);
|
cs = parent_cs(cs);
|
||||||
if (unlikely(!cs)) {
|
if (unlikely(!cs)) {
|
||||||
/*
|
/*
|
||||||
@@ -385,11 +393,10 @@ static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
|
|||||||
* cpuset's effective_cpus is on its way to be
|
* cpuset's effective_cpus is on its way to be
|
||||||
* identical to cpu_online_mask.
|
* identical to cpu_online_mask.
|
||||||
*/
|
*/
|
||||||
cpumask_copy(pmask, cpu_active_mask);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
cpumask_and(pmask, cs->effective_cpus, cpu_active_mask);
|
cpumask_and(pmask, pmask, cs->effective_cpus);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -2211,15 +2218,13 @@ static void cpuset_attach(struct cgroup_taskset *tset)
|
|||||||
|
|
||||||
mutex_lock(&cpuset_mutex);
|
mutex_lock(&cpuset_mutex);
|
||||||
|
|
||||||
/* prepare for attach */
|
|
||||||
if (cs == &top_cpuset)
|
|
||||||
cpumask_copy(cpus_attach, cpu_possible_mask);
|
|
||||||
else
|
|
||||||
guarantee_online_cpus(cs, cpus_attach);
|
|
||||||
|
|
||||||
guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
|
guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
|
||||||
|
|
||||||
cgroup_taskset_for_each(task, css, tset) {
|
cgroup_taskset_for_each(task, css, tset) {
|
||||||
|
if (cs != &top_cpuset)
|
||||||
|
guarantee_online_cpus(task, cpus_attach);
|
||||||
|
else
|
||||||
|
cpumask_copy(cpus_attach, task_cpu_possible_mask(task));
|
||||||
/*
|
/*
|
||||||
* can_attach beforehand should guarantee that this doesn't
|
* can_attach beforehand should guarantee that this doesn't
|
||||||
* fail. TODO: have a better way to handle failure here
|
* fail. TODO: have a better way to handle failure here
|
||||||
@@ -3317,7 +3322,7 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
|
|||||||
|
|
||||||
spin_lock_irqsave(&callback_lock, flags);
|
spin_lock_irqsave(&callback_lock, flags);
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
guarantee_online_cpus(task_cs(tsk), pmask);
|
guarantee_online_cpus(tsk, pmask);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
spin_unlock_irqrestore(&callback_lock, flags);
|
spin_unlock_irqrestore(&callback_lock, flags);
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user