Merge branch 'for-3.3' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup
* 'for-3.3' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup: (21 commits)
cgroup: fix to allow mounting a hierarchy by name
cgroup: move assignement out of condition in cgroup_attach_proc()
cgroup: Remove task_lock() from cgroup_post_fork()
cgroup: add sparse annotation to cgroup_iter_start() and cgroup_iter_end()
cgroup: mark cgroup_rmdir_waitq and cgroup_attach_proc() as static
cgroup: only need to check oldcgrp==newgrp once
cgroup: remove redundant get/put of task struct
cgroup: remove redundant get/put of old css_set from migrate
cgroup: Remove unnecessary task_lock before fetching css_set on migration
cgroup: Drop task_lock(parent) on cgroup_fork()
cgroups: remove redundant get/put of css_set from css_set_check_fetched()
resource cgroups: remove bogus cast
cgroup: kill subsys->can_attach_task(), pre_attach() and attach_task()
cgroup, cpuset: don't use ss->pre_attach()
cgroup: don't use subsys->can_attach_task() or ->attach_task()
cgroup: introduce cgroup_taskset and use it in subsys->can_attach(), cancel_attach() and attach()
cgroup: improve old cgroup handling in cgroup_attach_proc()
cgroup: always lock threadgroup during migration
threadgroup: extend threadgroup_lock() to cover exit and exec
threadgroup: rename signal->threadgroup_fork_lock to ->group_rwsem
...
Fix up conflict in kernel/cgroup.c due to commit e0197aae59
: "cgroups:
fix a css_set not found bug in cgroup_attach_proc" that already
mentioned that the bug is fixed (differently) in Tejun's cgroup
patchset. This one, in other words.
This commit is contained in:
@@ -637,13 +637,15 @@ struct signal_struct {
|
||||
#endif
|
||||
#ifdef CONFIG_CGROUPS
|
||||
/*
|
||||
* The threadgroup_fork_lock prevents threads from forking with
|
||||
* CLONE_THREAD while held for writing. Use this for fork-sensitive
|
||||
* threadgroup-wide operations. It's taken for reading in fork.c in
|
||||
* copy_process().
|
||||
* Currently only needed write-side by cgroups.
|
||||
* group_rwsem prevents new tasks from entering the threadgroup and
|
||||
* member tasks from exiting,a more specifically, setting of
|
||||
* PF_EXITING. fork and exit paths are protected with this rwsem
|
||||
* using threadgroup_change_begin/end(). Users which require
|
||||
* threadgroup to remain stable should use threadgroup_[un]lock()
|
||||
* which also takes care of exec path. Currently, cgroup is the
|
||||
* only user.
|
||||
*/
|
||||
struct rw_semaphore threadgroup_fork_lock;
|
||||
struct rw_semaphore group_rwsem;
|
||||
#endif
|
||||
|
||||
int oom_adj; /* OOM kill score adjustment (bit shift) */
|
||||
@@ -2394,29 +2396,62 @@ static inline void unlock_task_sighand(struct task_struct *tsk,
|
||||
spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
|
||||
}
|
||||
|
||||
/* See the declaration of threadgroup_fork_lock in signal_struct. */
|
||||
#ifdef CONFIG_CGROUPS
|
||||
static inline void threadgroup_fork_read_lock(struct task_struct *tsk)
|
||||
static inline void threadgroup_change_begin(struct task_struct *tsk)
|
||||
{
|
||||
down_read(&tsk->signal->threadgroup_fork_lock);
|
||||
down_read(&tsk->signal->group_rwsem);
|
||||
}
|
||||
static inline void threadgroup_fork_read_unlock(struct task_struct *tsk)
|
||||
static inline void threadgroup_change_end(struct task_struct *tsk)
|
||||
{
|
||||
up_read(&tsk->signal->threadgroup_fork_lock);
|
||||
up_read(&tsk->signal->group_rwsem);
|
||||
}
|
||||
static inline void threadgroup_fork_write_lock(struct task_struct *tsk)
|
||||
|
||||
/**
|
||||
* threadgroup_lock - lock threadgroup
|
||||
* @tsk: member task of the threadgroup to lock
|
||||
*
|
||||
* Lock the threadgroup @tsk belongs to. No new task is allowed to enter
|
||||
* and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
|
||||
* perform exec. This is useful for cases where the threadgroup needs to
|
||||
* stay stable across blockable operations.
|
||||
*
|
||||
* fork and exit paths explicitly call threadgroup_change_{begin|end}() for
|
||||
* synchronization. While held, no new task will be added to threadgroup
|
||||
* and no existing live task will have its PF_EXITING set.
|
||||
*
|
||||
* During exec, a task goes and puts its thread group through unusual
|
||||
* changes. After de-threading, exclusive access is assumed to resources
|
||||
* which are usually shared by tasks in the same group - e.g. sighand may
|
||||
* be replaced with a new one. Also, the exec'ing task takes over group
|
||||
* leader role including its pid. Exclude these changes while locked by
|
||||
* grabbing cred_guard_mutex which is used to synchronize exec path.
|
||||
*/
|
||||
static inline void threadgroup_lock(struct task_struct *tsk)
|
||||
{
|
||||
down_write(&tsk->signal->threadgroup_fork_lock);
|
||||
/*
|
||||
* exec uses exit for de-threading nesting group_rwsem inside
|
||||
* cred_guard_mutex. Grab cred_guard_mutex first.
|
||||
*/
|
||||
mutex_lock(&tsk->signal->cred_guard_mutex);
|
||||
down_write(&tsk->signal->group_rwsem);
|
||||
}
|
||||
static inline void threadgroup_fork_write_unlock(struct task_struct *tsk)
|
||||
|
||||
/**
|
||||
* threadgroup_unlock - unlock threadgroup
|
||||
* @tsk: member task of the threadgroup to unlock
|
||||
*
|
||||
* Reverse threadgroup_lock().
|
||||
*/
|
||||
static inline void threadgroup_unlock(struct task_struct *tsk)
|
||||
{
|
||||
up_write(&tsk->signal->threadgroup_fork_lock);
|
||||
up_write(&tsk->signal->group_rwsem);
|
||||
mutex_unlock(&tsk->signal->cred_guard_mutex);
|
||||
}
|
||||
#else
|
||||
static inline void threadgroup_fork_read_lock(struct task_struct *tsk) {}
|
||||
static inline void threadgroup_fork_read_unlock(struct task_struct *tsk) {}
|
||||
static inline void threadgroup_fork_write_lock(struct task_struct *tsk) {}
|
||||
static inline void threadgroup_fork_write_unlock(struct task_struct *tsk) {}
|
||||
static inline void threadgroup_change_begin(struct task_struct *tsk) {}
|
||||
static inline void threadgroup_change_end(struct task_struct *tsk) {}
|
||||
static inline void threadgroup_lock(struct task_struct *tsk) {}
|
||||
static inline void threadgroup_unlock(struct task_struct *tsk) {}
|
||||
#endif
|
||||
|
||||
#ifndef __HAVE_THREAD_FUNCTIONS
|
||||
|
Reference in New Issue
Block a user