cgroup: Fix threadgroup_rwsem <-> cpus_read_lock() deadlock

[ Upstream commit 4f7e7236435ca0abe005c674ebd6892c6e83aeb3 ]

Bringing up a CPU may involve creating and destroying tasks which requires
read-locking threadgroup_rwsem, so threadgroup_rwsem nests inside
cpus_read_lock(). However, cpuset's ->attach(), which may be called with
thredagroup_rwsem write-locked, also wants to disable CPU hotplug and
acquires cpus_read_lock(), leading to a deadlock.

Fix it by guaranteeing that ->attach() is always called with CPU hotplug
disabled and removing cpus_read_lock() call from cpuset_attach().

Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-and-tested-by: Imran Khan <imran.f.khan@oracle.com>
Reported-and-tested-by: Xuewen Yan <xuewen.yan@unisoc.com>
Fixes: 05c7b7a92cc8 ("cgroup/cpuset: Fix a race between cpuset_attach() and cpu hotplug")
Cc: stable@vger.kernel.org # v5.17+
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Tejun Heo
2022-08-15 13:27:38 -10:00
committed by Greg Kroah-Hartman
parent bfbacc2ef7
commit dee1e2b18c
2 changed files with 55 additions and 25 deletions

View File

@@ -2304,6 +2304,47 @@ int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
} }
EXPORT_SYMBOL_GPL(task_cgroup_path); EXPORT_SYMBOL_GPL(task_cgroup_path);
/**
* cgroup_attach_lock - Lock for ->attach()
* @lock_threadgroup: whether to down_write cgroup_threadgroup_rwsem
*
* cgroup migration sometimes needs to stabilize threadgroups against forks and
* exits by write-locking cgroup_threadgroup_rwsem. However, some ->attach()
* implementations (e.g. cpuset), also need to disable CPU hotplug.
* Unfortunately, letting ->attach() operations acquire cpus_read_lock() can
* lead to deadlocks.
*
* Bringing up a CPU may involve creating and destroying tasks which requires
* read-locking threadgroup_rwsem, so threadgroup_rwsem nests inside
* cpus_read_lock(). If we call an ->attach() which acquires the cpus lock while
* write-locking threadgroup_rwsem, the locking order is reversed and we end up
* waiting for an on-going CPU hotplug operation which in turn is waiting for
* the threadgroup_rwsem to be released to create new tasks. For more details:
*
* http://lkml.kernel.org/r/20220711174629.uehfmqegcwn2lqzu@wubuntu
*
* Resolve the situation by always acquiring cpus_read_lock() before optionally
* write-locking cgroup_threadgroup_rwsem. This allows ->attach() to assume that
* CPU hotplug is disabled on entry.
*/
static void cgroup_attach_lock(bool lock_threadgroup)
{
cpus_read_lock();
if (lock_threadgroup)
percpu_down_write(&cgroup_threadgroup_rwsem);
}
/**
* cgroup_attach_unlock - Undo cgroup_attach_lock()
* @lock_threadgroup: whether to up_write cgroup_threadgroup_rwsem
*/
static void cgroup_attach_unlock(bool lock_threadgroup)
{
if (lock_threadgroup)
percpu_up_write(&cgroup_threadgroup_rwsem);
cpus_read_unlock();
}
/** /**
* cgroup_migrate_add_task - add a migration target task to a migration context * cgroup_migrate_add_task - add a migration target task to a migration context
* @task: target task * @task: target task
@@ -2780,8 +2821,7 @@ int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
} }
struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup, struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
bool *locked) bool *threadgroup_locked)
__acquires(&cgroup_threadgroup_rwsem)
{ {
struct task_struct *tsk; struct task_struct *tsk;
pid_t pid; pid_t pid;
@@ -2798,12 +2838,8 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
* Therefore, we can skip the global lock. * Therefore, we can skip the global lock.
*/ */
lockdep_assert_held(&cgroup_mutex); lockdep_assert_held(&cgroup_mutex);
if (pid || threadgroup) { *threadgroup_locked = pid || threadgroup;
percpu_down_write(&cgroup_threadgroup_rwsem); cgroup_attach_lock(*threadgroup_locked);
*locked = true;
} else {
*locked = false;
}
rcu_read_lock(); rcu_read_lock();
if (pid) { if (pid) {
@@ -2834,17 +2870,14 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
goto out_unlock_rcu; goto out_unlock_rcu;
out_unlock_threadgroup: out_unlock_threadgroup:
if (*locked) { cgroup_attach_unlock(*threadgroup_locked);
percpu_up_write(&cgroup_threadgroup_rwsem); *threadgroup_locked = false;
*locked = false;
}
out_unlock_rcu: out_unlock_rcu:
rcu_read_unlock(); rcu_read_unlock();
return tsk; return tsk;
} }
void cgroup_procs_write_finish(struct task_struct *task, bool locked) void cgroup_procs_write_finish(struct task_struct *task, bool threadgroup_locked)
__releases(&cgroup_threadgroup_rwsem)
{ {
struct cgroup_subsys *ss; struct cgroup_subsys *ss;
int ssid; int ssid;
@@ -2852,8 +2885,8 @@ void cgroup_procs_write_finish(struct task_struct *task, bool locked)
/* release reference from cgroup_procs_write_start() */ /* release reference from cgroup_procs_write_start() */
put_task_struct(task); put_task_struct(task);
if (locked) cgroup_attach_unlock(threadgroup_locked);
percpu_up_write(&cgroup_threadgroup_rwsem);
for_each_subsys(ss, ssid) for_each_subsys(ss, ssid)
if (ss->post_attach) if (ss->post_attach)
ss->post_attach(); ss->post_attach();
@@ -2930,8 +2963,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
* write-locking can be skipped safely. * write-locking can be skipped safely.
*/ */
has_tasks = !list_empty(&mgctx.preloaded_src_csets); has_tasks = !list_empty(&mgctx.preloaded_src_csets);
if (has_tasks) cgroup_attach_lock(has_tasks);
percpu_down_write(&cgroup_threadgroup_rwsem);
/* NULL dst indicates self on default hierarchy */ /* NULL dst indicates self on default hierarchy */
ret = cgroup_migrate_prepare_dst(&mgctx); ret = cgroup_migrate_prepare_dst(&mgctx);
@@ -2952,8 +2984,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
ret = cgroup_migrate_execute(&mgctx); ret = cgroup_migrate_execute(&mgctx);
out_finish: out_finish:
cgroup_migrate_finish(&mgctx); cgroup_migrate_finish(&mgctx);
if (has_tasks) cgroup_attach_unlock(has_tasks);
percpu_up_write(&cgroup_threadgroup_rwsem);
return ret; return ret;
} }
@@ -4809,13 +4840,13 @@ static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
struct task_struct *task; struct task_struct *task;
const struct cred *saved_cred; const struct cred *saved_cred;
ssize_t ret; ssize_t ret;
bool locked; bool threadgroup_locked;
dst_cgrp = cgroup_kn_lock_live(of->kn, false); dst_cgrp = cgroup_kn_lock_live(of->kn, false);
if (!dst_cgrp) if (!dst_cgrp)
return -ENODEV; return -ENODEV;
task = cgroup_procs_write_start(buf, true, &locked); task = cgroup_procs_write_start(buf, true, &threadgroup_locked);
ret = PTR_ERR_OR_ZERO(task); ret = PTR_ERR_OR_ZERO(task);
if (ret) if (ret)
goto out_unlock; goto out_unlock;
@@ -4841,7 +4872,7 @@ static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
ret = cgroup_attach_task(dst_cgrp, task, true); ret = cgroup_attach_task(dst_cgrp, task, true);
out_finish: out_finish:
cgroup_procs_write_finish(task, locked); cgroup_procs_write_finish(task, threadgroup_locked);
out_unlock: out_unlock:
cgroup_kn_unlock(of->kn); cgroup_kn_unlock(of->kn);

View File

@@ -2212,7 +2212,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
cgroup_taskset_first(tset, &css); cgroup_taskset_first(tset, &css);
cs = css_cs(css); cs = css_cs(css);
cpus_read_lock(); lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */
percpu_down_write(&cpuset_rwsem); percpu_down_write(&cpuset_rwsem);
/* prepare for attach */ /* prepare for attach */
@@ -2268,7 +2268,6 @@ static void cpuset_attach(struct cgroup_taskset *tset)
wake_up(&cpuset_attach_wq); wake_up(&cpuset_attach_wq);
percpu_up_write(&cpuset_rwsem); percpu_up_write(&cpuset_rwsem);
cpus_read_unlock();
} }
/* The various types of files and directories in a cpuset file system */ /* The various types of files and directories in a cpuset file system */