Merge branch 'for-4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup
Pull cgroup updates from Tejun Heo: "Several noteworthy changes. - Parav's rdma controller is finally merged. It is very straight forward and can limit the abosolute numbers of common rdma constructs used by different cgroups. - kernel/cgroup.c got too chubby and disorganized. Created kernel/cgroup/ subdirectory and moved all cgroup related files under kernel/ there and reorganized the core code. This hurts for backporting patches but was long overdue. - cgroup v2 process listing reimplemented so that it no longer depends on allocating a buffer large enough to cache the entire result to sort and uniq the output. v2 has always mangled the sort order to ensure that users don't depend on the sorted output, so this shouldn't surprise anybody. This makes the pid listing functions use the same iterators that are used internally, which have to have the same iterating capabilities anyway. - perf cgroup filtering now works automatically on cgroup v2. This patch was posted a long time ago but somehow fell through the cracks. - misc fixes asnd documentation updates" * 'for-4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup: (27 commits) kernfs: fix locking around kernfs_ops->release() callback cgroup: drop the matching uid requirement on migration for cgroup v2 cgroup, perf_event: make perf_event controller work on cgroup2 hierarchy cgroup: misc cleanups cgroup: call subsys->*attach() only for subsystems which are actually affected by migration cgroup: track migration context in cgroup_mgctx cgroup: cosmetic update to cgroup_taskset_add() rdmacg: Fixed uninitialized current resource usage cgroup: Add missing cgroup-v2 PID controller documentation. rdmacg: Added documentation for rdmacg IB/core: added support to use rdma cgroup controller rdmacg: Added rdma cgroup controller cgroup: fix a comment typo cgroup: fix RCU related sparse warnings cgroup: move namespace code to kernel/cgroup/namespace.c cgroup: rename functions for consistency cgroup: move v1 mount functions to kernel/cgroup/cgroup-v1.c cgroup: separate out cgroup1_kf_syscall_ops cgroup: refactor mount path and clearly distinguish v1 and v2 paths cgroup: move cgroup v1 specific code to kernel/cgroup/cgroup-v1.c ...
This commit is contained in:
@@ -64,10 +64,7 @@ obj-$(CONFIG_KEXEC) += kexec.o
|
||||
obj-$(CONFIG_KEXEC_FILE) += kexec_file.o
|
||||
obj-$(CONFIG_BACKTRACE_SELF_TEST) += backtracetest.o
|
||||
obj-$(CONFIG_COMPAT) += compat.o
|
||||
obj-$(CONFIG_CGROUPS) += cgroup.o
|
||||
obj-$(CONFIG_CGROUP_FREEZER) += cgroup_freezer.o
|
||||
obj-$(CONFIG_CGROUP_PIDS) += cgroup_pids.o
|
||||
obj-$(CONFIG_CPUSETS) += cpuset.o
|
||||
obj-$(CONFIG_CGROUPS) += cgroup/
|
||||
obj-$(CONFIG_UTS_NS) += utsname.o
|
||||
obj-$(CONFIG_USER_NS) += user_namespace.o
|
||||
obj-$(CONFIG_PID_NS) += pid_namespace.o
|
||||
|
6
kernel/cgroup/Makefile
Normal file
6
kernel/cgroup/Makefile
Normal file
@@ -0,0 +1,6 @@
|
||||
obj-y := cgroup.o namespace.o cgroup-v1.o
|
||||
|
||||
obj-$(CONFIG_CGROUP_FREEZER) += freezer.o
|
||||
obj-$(CONFIG_CGROUP_PIDS) += pids.o
|
||||
obj-$(CONFIG_CGROUP_RDMA) += rdma.o
|
||||
obj-$(CONFIG_CPUSETS) += cpuset.o
|
214
kernel/cgroup/cgroup-internal.h
Normal file
214
kernel/cgroup/cgroup-internal.h
Normal file
@@ -0,0 +1,214 @@
|
||||
#ifndef __CGROUP_INTERNAL_H
|
||||
#define __CGROUP_INTERNAL_H
|
||||
|
||||
#include <linux/cgroup.h>
|
||||
#include <linux/kernfs.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/list.h>
|
||||
|
||||
/*
|
||||
* A cgroup can be associated with multiple css_sets as different tasks may
|
||||
* belong to different cgroups on different hierarchies. In the other
|
||||
* direction, a css_set is naturally associated with multiple cgroups.
|
||||
* This M:N relationship is represented by the following link structure
|
||||
* which exists for each association and allows traversing the associations
|
||||
* from both sides.
|
||||
*/
|
||||
struct cgrp_cset_link {
|
||||
/* the cgroup and css_set this link associates */
|
||||
struct cgroup *cgrp;
|
||||
struct css_set *cset;
|
||||
|
||||
/* list of cgrp_cset_links anchored at cgrp->cset_links */
|
||||
struct list_head cset_link;
|
||||
|
||||
/* list of cgrp_cset_links anchored at css_set->cgrp_links */
|
||||
struct list_head cgrp_link;
|
||||
};
|
||||
|
||||
/* used to track tasks and csets during migration */
|
||||
struct cgroup_taskset {
|
||||
/* the src and dst cset list running through cset->mg_node */
|
||||
struct list_head src_csets;
|
||||
struct list_head dst_csets;
|
||||
|
||||
/* the subsys currently being processed */
|
||||
int ssid;
|
||||
|
||||
/*
|
||||
* Fields for cgroup_taskset_*() iteration.
|
||||
*
|
||||
* Before migration is committed, the target migration tasks are on
|
||||
* ->mg_tasks of the csets on ->src_csets. After, on ->mg_tasks of
|
||||
* the csets on ->dst_csets. ->csets point to either ->src_csets
|
||||
* or ->dst_csets depending on whether migration is committed.
|
||||
*
|
||||
* ->cur_csets and ->cur_task point to the current task position
|
||||
* during iteration.
|
||||
*/
|
||||
struct list_head *csets;
|
||||
struct css_set *cur_cset;
|
||||
struct task_struct *cur_task;
|
||||
};
|
||||
|
||||
/* migration context also tracks preloading */
|
||||
struct cgroup_mgctx {
|
||||
/*
|
||||
* Preloaded source and destination csets. Used to guarantee
|
||||
* atomic success or failure on actual migration.
|
||||
*/
|
||||
struct list_head preloaded_src_csets;
|
||||
struct list_head preloaded_dst_csets;
|
||||
|
||||
/* tasks and csets to migrate */
|
||||
struct cgroup_taskset tset;
|
||||
|
||||
/* subsystems affected by migration */
|
||||
u16 ss_mask;
|
||||
};
|
||||
|
||||
#define CGROUP_TASKSET_INIT(tset) \
|
||||
{ \
|
||||
.src_csets = LIST_HEAD_INIT(tset.src_csets), \
|
||||
.dst_csets = LIST_HEAD_INIT(tset.dst_csets), \
|
||||
.csets = &tset.src_csets, \
|
||||
}
|
||||
|
||||
#define CGROUP_MGCTX_INIT(name) \
|
||||
{ \
|
||||
LIST_HEAD_INIT(name.preloaded_src_csets), \
|
||||
LIST_HEAD_INIT(name.preloaded_dst_csets), \
|
||||
CGROUP_TASKSET_INIT(name.tset), \
|
||||
}
|
||||
|
||||
#define DEFINE_CGROUP_MGCTX(name) \
|
||||
struct cgroup_mgctx name = CGROUP_MGCTX_INIT(name)
|
||||
|
||||
struct cgroup_sb_opts {
|
||||
u16 subsys_mask;
|
||||
unsigned int flags;
|
||||
char *release_agent;
|
||||
bool cpuset_clone_children;
|
||||
char *name;
|
||||
/* User explicitly requested empty subsystem */
|
||||
bool none;
|
||||
};
|
||||
|
||||
extern struct mutex cgroup_mutex;
|
||||
extern spinlock_t css_set_lock;
|
||||
extern struct cgroup_subsys *cgroup_subsys[];
|
||||
extern struct list_head cgroup_roots;
|
||||
extern struct file_system_type cgroup_fs_type;
|
||||
|
||||
/* iterate across the hierarchies */
|
||||
#define for_each_root(root) \
|
||||
list_for_each_entry((root), &cgroup_roots, root_list)
|
||||
|
||||
/**
|
||||
* for_each_subsys - iterate all enabled cgroup subsystems
|
||||
* @ss: the iteration cursor
|
||||
* @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
|
||||
*/
|
||||
#define for_each_subsys(ss, ssid) \
|
||||
for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT && \
|
||||
(((ss) = cgroup_subsys[ssid]) || true); (ssid)++)
|
||||
|
||||
static inline bool cgroup_is_dead(const struct cgroup *cgrp)
|
||||
{
|
||||
return !(cgrp->self.flags & CSS_ONLINE);
|
||||
}
|
||||
|
||||
static inline bool notify_on_release(const struct cgroup *cgrp)
|
||||
{
|
||||
return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
|
||||
}
|
||||
|
||||
void put_css_set_locked(struct css_set *cset);
|
||||
|
||||
static inline void put_css_set(struct css_set *cset)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* Ensure that the refcount doesn't hit zero while any readers
|
||||
* can see it. Similar to atomic_dec_and_lock(), but for an
|
||||
* rwlock
|
||||
*/
|
||||
if (atomic_add_unless(&cset->refcount, -1, 1))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&css_set_lock, flags);
|
||||
put_css_set_locked(cset);
|
||||
spin_unlock_irqrestore(&css_set_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* refcounted get/put for css_set objects
|
||||
*/
|
||||
static inline void get_css_set(struct css_set *cset)
|
||||
{
|
||||
atomic_inc(&cset->refcount);
|
||||
}
|
||||
|
||||
bool cgroup_ssid_enabled(int ssid);
|
||||
bool cgroup_on_dfl(const struct cgroup *cgrp);
|
||||
|
||||
struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root);
|
||||
struct cgroup *task_cgroup_from_root(struct task_struct *task,
|
||||
struct cgroup_root *root);
|
||||
struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn, bool drain_offline);
|
||||
void cgroup_kn_unlock(struct kernfs_node *kn);
|
||||
int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen,
|
||||
struct cgroup_namespace *ns);
|
||||
|
||||
void cgroup_free_root(struct cgroup_root *root);
|
||||
void init_cgroup_root(struct cgroup_root *root, struct cgroup_sb_opts *opts);
|
||||
int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask);
|
||||
int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask);
|
||||
struct dentry *cgroup_do_mount(struct file_system_type *fs_type, int flags,
|
||||
struct cgroup_root *root, unsigned long magic,
|
||||
struct cgroup_namespace *ns);
|
||||
|
||||
bool cgroup_may_migrate_to(struct cgroup *dst_cgrp);
|
||||
void cgroup_migrate_finish(struct cgroup_mgctx *mgctx);
|
||||
void cgroup_migrate_add_src(struct css_set *src_cset, struct cgroup *dst_cgrp,
|
||||
struct cgroup_mgctx *mgctx);
|
||||
int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx);
|
||||
int cgroup_migrate(struct task_struct *leader, bool threadgroup,
|
||||
struct cgroup_mgctx *mgctx);
|
||||
|
||||
int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
|
||||
bool threadgroup);
|
||||
ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
|
||||
size_t nbytes, loff_t off, bool threadgroup);
|
||||
ssize_t cgroup_procs_write(struct kernfs_open_file *of, char *buf, size_t nbytes,
|
||||
loff_t off);
|
||||
|
||||
void cgroup_lock_and_drain_offline(struct cgroup *cgrp);
|
||||
|
||||
int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode);
|
||||
int cgroup_rmdir(struct kernfs_node *kn);
|
||||
int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
|
||||
struct kernfs_root *kf_root);
|
||||
|
||||
/*
|
||||
* namespace.c
|
||||
*/
|
||||
extern const struct proc_ns_operations cgroupns_operations;
|
||||
|
||||
/*
|
||||
* cgroup-v1.c
|
||||
*/
|
||||
extern struct cftype cgroup1_base_files[];
|
||||
extern const struct file_operations proc_cgroupstats_operations;
|
||||
extern struct kernfs_syscall_ops cgroup1_kf_syscall_ops;
|
||||
|
||||
bool cgroup1_ssid_disabled(int ssid);
|
||||
void cgroup1_pidlist_destroy_all(struct cgroup *cgrp);
|
||||
void cgroup1_release_agent(struct work_struct *work);
|
||||
void cgroup1_check_for_release(struct cgroup *cgrp);
|
||||
struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags,
|
||||
void *data, unsigned long magic,
|
||||
struct cgroup_namespace *ns);
|
||||
|
||||
#endif /* __CGROUP_INTERNAL_H */
|
1395
kernel/cgroup/cgroup-v1.c
Normal file
1395
kernel/cgroup/cgroup-v1.c
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
155
kernel/cgroup/namespace.c
Normal file
155
kernel/cgroup/namespace.c
Normal file
@@ -0,0 +1,155 @@
|
||||
#include "cgroup-internal.h"
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/nsproxy.h>
|
||||
#include <linux/proc_ns.h>
|
||||
|
||||
|
||||
/* cgroup namespaces */
|
||||
|
||||
static struct ucounts *inc_cgroup_namespaces(struct user_namespace *ns)
|
||||
{
|
||||
return inc_ucount(ns, current_euid(), UCOUNT_CGROUP_NAMESPACES);
|
||||
}
|
||||
|
||||
static void dec_cgroup_namespaces(struct ucounts *ucounts)
|
||||
{
|
||||
dec_ucount(ucounts, UCOUNT_CGROUP_NAMESPACES);
|
||||
}
|
||||
|
||||
static struct cgroup_namespace *alloc_cgroup_ns(void)
|
||||
{
|
||||
struct cgroup_namespace *new_ns;
|
||||
int ret;
|
||||
|
||||
new_ns = kzalloc(sizeof(struct cgroup_namespace), GFP_KERNEL);
|
||||
if (!new_ns)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
ret = ns_alloc_inum(&new_ns->ns);
|
||||
if (ret) {
|
||||
kfree(new_ns);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
atomic_set(&new_ns->count, 1);
|
||||
new_ns->ns.ops = &cgroupns_operations;
|
||||
return new_ns;
|
||||
}
|
||||
|
||||
void free_cgroup_ns(struct cgroup_namespace *ns)
|
||||
{
|
||||
put_css_set(ns->root_cset);
|
||||
dec_cgroup_namespaces(ns->ucounts);
|
||||
put_user_ns(ns->user_ns);
|
||||
ns_free_inum(&ns->ns);
|
||||
kfree(ns);
|
||||
}
|
||||
EXPORT_SYMBOL(free_cgroup_ns);
|
||||
|
||||
struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
|
||||
struct user_namespace *user_ns,
|
||||
struct cgroup_namespace *old_ns)
|
||||
{
|
||||
struct cgroup_namespace *new_ns;
|
||||
struct ucounts *ucounts;
|
||||
struct css_set *cset;
|
||||
|
||||
BUG_ON(!old_ns);
|
||||
|
||||
if (!(flags & CLONE_NEWCGROUP)) {
|
||||
get_cgroup_ns(old_ns);
|
||||
return old_ns;
|
||||
}
|
||||
|
||||
/* Allow only sysadmin to create cgroup namespace. */
|
||||
if (!ns_capable(user_ns, CAP_SYS_ADMIN))
|
||||
return ERR_PTR(-EPERM);
|
||||
|
||||
ucounts = inc_cgroup_namespaces(user_ns);
|
||||
if (!ucounts)
|
||||
return ERR_PTR(-ENOSPC);
|
||||
|
||||
/* It is not safe to take cgroup_mutex here */
|
||||
spin_lock_irq(&css_set_lock);
|
||||
cset = task_css_set(current);
|
||||
get_css_set(cset);
|
||||
spin_unlock_irq(&css_set_lock);
|
||||
|
||||
new_ns = alloc_cgroup_ns();
|
||||
if (IS_ERR(new_ns)) {
|
||||
put_css_set(cset);
|
||||
dec_cgroup_namespaces(ucounts);
|
||||
return new_ns;
|
||||
}
|
||||
|
||||
new_ns->user_ns = get_user_ns(user_ns);
|
||||
new_ns->ucounts = ucounts;
|
||||
new_ns->root_cset = cset;
|
||||
|
||||
return new_ns;
|
||||
}
|
||||
|
||||
static inline struct cgroup_namespace *to_cg_ns(struct ns_common *ns)
|
||||
{
|
||||
return container_of(ns, struct cgroup_namespace, ns);
|
||||
}
|
||||
|
||||
static int cgroupns_install(struct nsproxy *nsproxy, struct ns_common *ns)
|
||||
{
|
||||
struct cgroup_namespace *cgroup_ns = to_cg_ns(ns);
|
||||
|
||||
if (!ns_capable(current_user_ns(), CAP_SYS_ADMIN) ||
|
||||
!ns_capable(cgroup_ns->user_ns, CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
/* Don't need to do anything if we are attaching to our own cgroupns. */
|
||||
if (cgroup_ns == nsproxy->cgroup_ns)
|
||||
return 0;
|
||||
|
||||
get_cgroup_ns(cgroup_ns);
|
||||
put_cgroup_ns(nsproxy->cgroup_ns);
|
||||
nsproxy->cgroup_ns = cgroup_ns;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ns_common *cgroupns_get(struct task_struct *task)
|
||||
{
|
||||
struct cgroup_namespace *ns = NULL;
|
||||
struct nsproxy *nsproxy;
|
||||
|
||||
task_lock(task);
|
||||
nsproxy = task->nsproxy;
|
||||
if (nsproxy) {
|
||||
ns = nsproxy->cgroup_ns;
|
||||
get_cgroup_ns(ns);
|
||||
}
|
||||
task_unlock(task);
|
||||
|
||||
return ns ? &ns->ns : NULL;
|
||||
}
|
||||
|
||||
static void cgroupns_put(struct ns_common *ns)
|
||||
{
|
||||
put_cgroup_ns(to_cg_ns(ns));
|
||||
}
|
||||
|
||||
static struct user_namespace *cgroupns_owner(struct ns_common *ns)
|
||||
{
|
||||
return to_cg_ns(ns)->user_ns;
|
||||
}
|
||||
|
||||
const struct proc_ns_operations cgroupns_operations = {
|
||||
.name = "cgroup",
|
||||
.type = CLONE_NEWCGROUP,
|
||||
.get = cgroupns_get,
|
||||
.put = cgroupns_put,
|
||||
.install = cgroupns_install,
|
||||
.owner = cgroupns_owner,
|
||||
};
|
||||
|
||||
static __init int cgroup_namespaces_init(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
subsys_initcall(cgroup_namespaces_init);
|
619
kernel/cgroup/rdma.c
Normal file
619
kernel/cgroup/rdma.c
Normal file
@@ -0,0 +1,619 @@
|
||||
/*
|
||||
* RDMA resource limiting controller for cgroups.
|
||||
*
|
||||
* Used to allow a cgroup hierarchy to stop processes from consuming
|
||||
* additional RDMA resources after a certain limit is reached.
|
||||
*
|
||||
* Copyright (C) 2016 Parav Pandit <pandit.parav@gmail.com>
|
||||
*
|
||||
* This file is subject to the terms and conditions of version 2 of the GNU
|
||||
* General Public License. See the file COPYING in the main directory of the
|
||||
* Linux distribution for more details.
|
||||
*/
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/cgroup.h>
|
||||
#include <linux/parser.h>
|
||||
#include <linux/cgroup_rdma.h>
|
||||
|
||||
#define RDMACG_MAX_STR "max"
|
||||
|
||||
/*
|
||||
* Protects list of resource pools maintained on per cgroup basis
|
||||
* and rdma device list.
|
||||
*/
|
||||
static DEFINE_MUTEX(rdmacg_mutex);
|
||||
static LIST_HEAD(rdmacg_devices);
|
||||
|
||||
enum rdmacg_file_type {
|
||||
RDMACG_RESOURCE_TYPE_MAX,
|
||||
RDMACG_RESOURCE_TYPE_STAT,
|
||||
};
|
||||
|
||||
/*
|
||||
* resource table definition as to be seen by the user.
|
||||
* Need to add entries to it when more resources are
|
||||
* added/defined at IB verb/core layer.
|
||||
*/
|
||||
static char const *rdmacg_resource_names[] = {
|
||||
[RDMACG_RESOURCE_HCA_HANDLE] = "hca_handle",
|
||||
[RDMACG_RESOURCE_HCA_OBJECT] = "hca_object",
|
||||
};
|
||||
|
||||
/* resource tracker for each resource of rdma cgroup */
|
||||
struct rdmacg_resource {
|
||||
int max;
|
||||
int usage;
|
||||
};
|
||||
|
||||
/*
|
||||
* resource pool object which represents per cgroup, per device
|
||||
* resources. There are multiple instances of this object per cgroup,
|
||||
* therefore it cannot be embedded within rdma_cgroup structure. It
|
||||
* is maintained as list.
|
||||
*/
|
||||
struct rdmacg_resource_pool {
|
||||
struct rdmacg_device *device;
|
||||
struct rdmacg_resource resources[RDMACG_RESOURCE_MAX];
|
||||
|
||||
struct list_head cg_node;
|
||||
struct list_head dev_node;
|
||||
|
||||
/* count active user tasks of this pool */
|
||||
u64 usage_sum;
|
||||
/* total number counts which are set to max */
|
||||
int num_max_cnt;
|
||||
};
|
||||
|
||||
static struct rdma_cgroup *css_rdmacg(struct cgroup_subsys_state *css)
|
||||
{
|
||||
return container_of(css, struct rdma_cgroup, css);
|
||||
}
|
||||
|
||||
static struct rdma_cgroup *parent_rdmacg(struct rdma_cgroup *cg)
|
||||
{
|
||||
return css_rdmacg(cg->css.parent);
|
||||
}
|
||||
|
||||
static inline struct rdma_cgroup *get_current_rdmacg(void)
|
||||
{
|
||||
return css_rdmacg(task_get_css(current, rdma_cgrp_id));
|
||||
}
|
||||
|
||||
static void set_resource_limit(struct rdmacg_resource_pool *rpool,
|
||||
int index, int new_max)
|
||||
{
|
||||
if (new_max == S32_MAX) {
|
||||
if (rpool->resources[index].max != S32_MAX)
|
||||
rpool->num_max_cnt++;
|
||||
} else {
|
||||
if (rpool->resources[index].max == S32_MAX)
|
||||
rpool->num_max_cnt--;
|
||||
}
|
||||
rpool->resources[index].max = new_max;
|
||||
}
|
||||
|
||||
static void set_all_resource_max_limit(struct rdmacg_resource_pool *rpool)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < RDMACG_RESOURCE_MAX; i++)
|
||||
set_resource_limit(rpool, i, S32_MAX);
|
||||
}
|
||||
|
||||
static void free_cg_rpool_locked(struct rdmacg_resource_pool *rpool)
|
||||
{
|
||||
lockdep_assert_held(&rdmacg_mutex);
|
||||
|
||||
list_del(&rpool->cg_node);
|
||||
list_del(&rpool->dev_node);
|
||||
kfree(rpool);
|
||||
}
|
||||
|
||||
static struct rdmacg_resource_pool *
|
||||
find_cg_rpool_locked(struct rdma_cgroup *cg,
|
||||
struct rdmacg_device *device)
|
||||
|
||||
{
|
||||
struct rdmacg_resource_pool *pool;
|
||||
|
||||
lockdep_assert_held(&rdmacg_mutex);
|
||||
|
||||
list_for_each_entry(pool, &cg->rpools, cg_node)
|
||||
if (pool->device == device)
|
||||
return pool;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct rdmacg_resource_pool *
|
||||
get_cg_rpool_locked(struct rdma_cgroup *cg, struct rdmacg_device *device)
|
||||
{
|
||||
struct rdmacg_resource_pool *rpool;
|
||||
|
||||
rpool = find_cg_rpool_locked(cg, device);
|
||||
if (rpool)
|
||||
return rpool;
|
||||
|
||||
rpool = kzalloc(sizeof(*rpool), GFP_KERNEL);
|
||||
if (!rpool)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
rpool->device = device;
|
||||
set_all_resource_max_limit(rpool);
|
||||
|
||||
INIT_LIST_HEAD(&rpool->cg_node);
|
||||
INIT_LIST_HEAD(&rpool->dev_node);
|
||||
list_add_tail(&rpool->cg_node, &cg->rpools);
|
||||
list_add_tail(&rpool->dev_node, &device->rpools);
|
||||
return rpool;
|
||||
}
|
||||
|
||||
/**
|
||||
* uncharge_cg_locked - uncharge resource for rdma cgroup
|
||||
* @cg: pointer to cg to uncharge and all parents in hierarchy
|
||||
* @device: pointer to rdmacg device
|
||||
* @index: index of the resource to uncharge in cg (resource pool)
|
||||
*
|
||||
* It also frees the resource pool which was created as part of
|
||||
* charging operation when there are no resources attached to
|
||||
* resource pool.
|
||||
*/
|
||||
static void
|
||||
uncharge_cg_locked(struct rdma_cgroup *cg,
|
||||
struct rdmacg_device *device,
|
||||
enum rdmacg_resource_type index)
|
||||
{
|
||||
struct rdmacg_resource_pool *rpool;
|
||||
|
||||
rpool = find_cg_rpool_locked(cg, device);
|
||||
|
||||
/*
|
||||
* rpool cannot be null at this stage. Let kernel operate in case
|
||||
* if there a bug in IB stack or rdma controller, instead of crashing
|
||||
* the system.
|
||||
*/
|
||||
if (unlikely(!rpool)) {
|
||||
pr_warn("Invalid device %p or rdma cgroup %p\n", cg, device);
|
||||
return;
|
||||
}
|
||||
|
||||
rpool->resources[index].usage--;
|
||||
|
||||
/*
|
||||
* A negative count (or overflow) is invalid,
|
||||
* it indicates a bug in the rdma controller.
|
||||
*/
|
||||
WARN_ON_ONCE(rpool->resources[index].usage < 0);
|
||||
rpool->usage_sum--;
|
||||
if (rpool->usage_sum == 0 &&
|
||||
rpool->num_max_cnt == RDMACG_RESOURCE_MAX) {
|
||||
/*
|
||||
* No user of the rpool and all entries are set to max, so
|
||||
* safe to delete this rpool.
|
||||
*/
|
||||
free_cg_rpool_locked(rpool);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* rdmacg_uncharge_hierarchy - hierarchically uncharge rdma resource count
|
||||
* @device: pointer to rdmacg device
|
||||
* @stop_cg: while traversing hirerchy, when meet with stop_cg cgroup
|
||||
* stop uncharging
|
||||
* @index: index of the resource to uncharge in cg in given resource pool
|
||||
*/
|
||||
static void rdmacg_uncharge_hierarchy(struct rdma_cgroup *cg,
|
||||
struct rdmacg_device *device,
|
||||
struct rdma_cgroup *stop_cg,
|
||||
enum rdmacg_resource_type index)
|
||||
{
|
||||
struct rdma_cgroup *p;
|
||||
|
||||
mutex_lock(&rdmacg_mutex);
|
||||
|
||||
for (p = cg; p != stop_cg; p = parent_rdmacg(p))
|
||||
uncharge_cg_locked(p, device, index);
|
||||
|
||||
mutex_unlock(&rdmacg_mutex);
|
||||
|
||||
css_put(&cg->css);
|
||||
}
|
||||
|
||||
/**
|
||||
* rdmacg_uncharge - hierarchically uncharge rdma resource count
|
||||
* @device: pointer to rdmacg device
|
||||
* @index: index of the resource to uncharge in cgroup in given resource pool
|
||||
*/
|
||||
void rdmacg_uncharge(struct rdma_cgroup *cg,
|
||||
struct rdmacg_device *device,
|
||||
enum rdmacg_resource_type index)
|
||||
{
|
||||
if (index >= RDMACG_RESOURCE_MAX)
|
||||
return;
|
||||
|
||||
rdmacg_uncharge_hierarchy(cg, device, NULL, index);
|
||||
}
|
||||
EXPORT_SYMBOL(rdmacg_uncharge);
|
||||
|
||||
/**
|
||||
* rdmacg_try_charge - hierarchically try to charge the rdma resource
|
||||
* @rdmacg: pointer to rdma cgroup which will own this resource
|
||||
* @device: pointer to rdmacg device
|
||||
* @index: index of the resource to charge in cgroup (resource pool)
|
||||
*
|
||||
* This function follows charging resource in hierarchical way.
|
||||
* It will fail if the charge would cause the new value to exceed the
|
||||
* hierarchical limit.
|
||||
* Returns 0 if the charge succeded, otherwise -EAGAIN, -ENOMEM or -EINVAL.
|
||||
* Returns pointer to rdmacg for this resource when charging is successful.
|
||||
*
|
||||
* Charger needs to account resources on two criteria.
|
||||
* (a) per cgroup & (b) per device resource usage.
|
||||
* Per cgroup resource usage ensures that tasks of cgroup doesn't cross
|
||||
* the configured limits. Per device provides granular configuration
|
||||
* in multi device usage. It allocates resource pool in the hierarchy
|
||||
* for each parent it come across for first resource. Later on resource
|
||||
* pool will be available. Therefore it will be much faster thereon
|
||||
* to charge/uncharge.
|
||||
*/
|
||||
int rdmacg_try_charge(struct rdma_cgroup **rdmacg,
|
||||
struct rdmacg_device *device,
|
||||
enum rdmacg_resource_type index)
|
||||
{
|
||||
struct rdma_cgroup *cg, *p;
|
||||
struct rdmacg_resource_pool *rpool;
|
||||
s64 new;
|
||||
int ret = 0;
|
||||
|
||||
if (index >= RDMACG_RESOURCE_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* hold on to css, as cgroup can be removed but resource
|
||||
* accounting happens on css.
|
||||
*/
|
||||
cg = get_current_rdmacg();
|
||||
|
||||
mutex_lock(&rdmacg_mutex);
|
||||
for (p = cg; p; p = parent_rdmacg(p)) {
|
||||
rpool = get_cg_rpool_locked(p, device);
|
||||
if (IS_ERR(rpool)) {
|
||||
ret = PTR_ERR(rpool);
|
||||
goto err;
|
||||
} else {
|
||||
new = rpool->resources[index].usage + 1;
|
||||
if (new > rpool->resources[index].max) {
|
||||
ret = -EAGAIN;
|
||||
goto err;
|
||||
} else {
|
||||
rpool->resources[index].usage = new;
|
||||
rpool->usage_sum++;
|
||||
}
|
||||
}
|
||||
}
|
||||
mutex_unlock(&rdmacg_mutex);
|
||||
|
||||
*rdmacg = cg;
|
||||
return 0;
|
||||
|
||||
err:
|
||||
mutex_unlock(&rdmacg_mutex);
|
||||
rdmacg_uncharge_hierarchy(cg, device, p, index);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(rdmacg_try_charge);
|
||||
|
||||
/**
|
||||
* rdmacg_register_device - register rdmacg device to rdma controller.
|
||||
* @device: pointer to rdmacg device whose resources need to be accounted.
|
||||
*
|
||||
* If IB stack wish a device to participate in rdma cgroup resource
|
||||
* tracking, it must invoke this API to register with rdma cgroup before
|
||||
* any user space application can start using the RDMA resources.
|
||||
* Returns 0 on success or EINVAL when table length given is beyond
|
||||
* supported size.
|
||||
*/
|
||||
int rdmacg_register_device(struct rdmacg_device *device)
|
||||
{
|
||||
INIT_LIST_HEAD(&device->dev_node);
|
||||
INIT_LIST_HEAD(&device->rpools);
|
||||
|
||||
mutex_lock(&rdmacg_mutex);
|
||||
list_add_tail(&device->dev_node, &rdmacg_devices);
|
||||
mutex_unlock(&rdmacg_mutex);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(rdmacg_register_device);
|
||||
|
||||
/**
|
||||
* rdmacg_unregister_device - unregister rdmacg device from rdma controller.
|
||||
* @device: pointer to rdmacg device which was previously registered with rdma
|
||||
* controller using rdmacg_register_device().
|
||||
*
|
||||
* IB stack must invoke this after all the resources of the IB device
|
||||
* are destroyed and after ensuring that no more resources will be created
|
||||
* when this API is invoked.
|
||||
*/
|
||||
void rdmacg_unregister_device(struct rdmacg_device *device)
|
||||
{
|
||||
struct rdmacg_resource_pool *rpool, *tmp;
|
||||
|
||||
/*
|
||||
* Synchronize with any active resource settings,
|
||||
* usage query happening via configfs.
|
||||
*/
|
||||
mutex_lock(&rdmacg_mutex);
|
||||
list_del_init(&device->dev_node);
|
||||
|
||||
/*
|
||||
* Now that this device is off the cgroup list, its safe to free
|
||||
* all the rpool resources.
|
||||
*/
|
||||
list_for_each_entry_safe(rpool, tmp, &device->rpools, dev_node)
|
||||
free_cg_rpool_locked(rpool);
|
||||
|
||||
mutex_unlock(&rdmacg_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(rdmacg_unregister_device);
|
||||
|
||||
static int parse_resource(char *c, int *intval)
|
||||
{
|
||||
substring_t argstr;
|
||||
const char **table = &rdmacg_resource_names[0];
|
||||
char *name, *value = c;
|
||||
size_t len;
|
||||
int ret, i = 0;
|
||||
|
||||
name = strsep(&value, "=");
|
||||
if (!name || !value)
|
||||
return -EINVAL;
|
||||
|
||||
len = strlen(value);
|
||||
|
||||
for (i = 0; i < RDMACG_RESOURCE_MAX; i++) {
|
||||
if (strcmp(table[i], name))
|
||||
continue;
|
||||
|
||||
argstr.from = value;
|
||||
argstr.to = value + len;
|
||||
|
||||
ret = match_int(&argstr, intval);
|
||||
if (ret >= 0) {
|
||||
if (*intval < 0)
|
||||
break;
|
||||
return i;
|
||||
}
|
||||
if (strncmp(value, RDMACG_MAX_STR, len) == 0) {
|
||||
*intval = S32_MAX;
|
||||
return i;
|
||||
}
|
||||
break;
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int rdmacg_parse_limits(char *options,
|
||||
int *new_limits, unsigned long *enables)
|
||||
{
|
||||
char *c;
|
||||
int err = -EINVAL;
|
||||
|
||||
/* parse resource options */
|
||||
while ((c = strsep(&options, " ")) != NULL) {
|
||||
int index, intval;
|
||||
|
||||
index = parse_resource(c, &intval);
|
||||
if (index < 0)
|
||||
goto err;
|
||||
|
||||
new_limits[index] = intval;
|
||||
*enables |= BIT(index);
|
||||
}
|
||||
return 0;
|
||||
|
||||
err:
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct rdmacg_device *rdmacg_get_device_locked(const char *name)
|
||||
{
|
||||
struct rdmacg_device *device;
|
||||
|
||||
lockdep_assert_held(&rdmacg_mutex);
|
||||
|
||||
list_for_each_entry(device, &rdmacg_devices, dev_node)
|
||||
if (!strcmp(name, device->name))
|
||||
return device;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static ssize_t rdmacg_resource_set_max(struct kernfs_open_file *of,
|
||||
char *buf, size_t nbytes, loff_t off)
|
||||
{
|
||||
struct rdma_cgroup *cg = css_rdmacg(of_css(of));
|
||||
const char *dev_name;
|
||||
struct rdmacg_resource_pool *rpool;
|
||||
struct rdmacg_device *device;
|
||||
char *options = strstrip(buf);
|
||||
int *new_limits;
|
||||
unsigned long enables = 0;
|
||||
int i = 0, ret = 0;
|
||||
|
||||
/* extract the device name first */
|
||||
dev_name = strsep(&options, " ");
|
||||
if (!dev_name) {
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
new_limits = kcalloc(RDMACG_RESOURCE_MAX, sizeof(int), GFP_KERNEL);
|
||||
if (!new_limits) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = rdmacg_parse_limits(options, new_limits, &enables);
|
||||
if (ret)
|
||||
goto parse_err;
|
||||
|
||||
/* acquire lock to synchronize with hot plug devices */
|
||||
mutex_lock(&rdmacg_mutex);
|
||||
|
||||
device = rdmacg_get_device_locked(dev_name);
|
||||
if (!device) {
|
||||
ret = -ENODEV;
|
||||
goto dev_err;
|
||||
}
|
||||
|
||||
rpool = get_cg_rpool_locked(cg, device);
|
||||
if (IS_ERR(rpool)) {
|
||||
ret = PTR_ERR(rpool);
|
||||
goto dev_err;
|
||||
}
|
||||
|
||||
/* now set the new limits of the rpool */
|
||||
for_each_set_bit(i, &enables, RDMACG_RESOURCE_MAX)
|
||||
set_resource_limit(rpool, i, new_limits[i]);
|
||||
|
||||
if (rpool->usage_sum == 0 &&
|
||||
rpool->num_max_cnt == RDMACG_RESOURCE_MAX) {
|
||||
/*
|
||||
* No user of the rpool and all entries are set to max, so
|
||||
* safe to delete this rpool.
|
||||
*/
|
||||
free_cg_rpool_locked(rpool);
|
||||
}
|
||||
|
||||
dev_err:
|
||||
mutex_unlock(&rdmacg_mutex);
|
||||
|
||||
parse_err:
|
||||
kfree(new_limits);
|
||||
|
||||
err:
|
||||
return ret ?: nbytes;
|
||||
}
|
||||
|
||||
static void print_rpool_values(struct seq_file *sf,
|
||||
struct rdmacg_resource_pool *rpool)
|
||||
{
|
||||
enum rdmacg_file_type sf_type;
|
||||
int i;
|
||||
u32 value;
|
||||
|
||||
sf_type = seq_cft(sf)->private;
|
||||
|
||||
for (i = 0; i < RDMACG_RESOURCE_MAX; i++) {
|
||||
seq_puts(sf, rdmacg_resource_names[i]);
|
||||
seq_putc(sf, '=');
|
||||
if (sf_type == RDMACG_RESOURCE_TYPE_MAX) {
|
||||
if (rpool)
|
||||
value = rpool->resources[i].max;
|
||||
else
|
||||
value = S32_MAX;
|
||||
} else {
|
||||
if (rpool)
|
||||
value = rpool->resources[i].usage;
|
||||
else
|
||||
value = 0;
|
||||
}
|
||||
|
||||
if (value == S32_MAX)
|
||||
seq_puts(sf, RDMACG_MAX_STR);
|
||||
else
|
||||
seq_printf(sf, "%d", value);
|
||||
seq_putc(sf, ' ');
|
||||
}
|
||||
}
|
||||
|
||||
static int rdmacg_resource_read(struct seq_file *sf, void *v)
|
||||
{
|
||||
struct rdmacg_device *device;
|
||||
struct rdmacg_resource_pool *rpool;
|
||||
struct rdma_cgroup *cg = css_rdmacg(seq_css(sf));
|
||||
|
||||
mutex_lock(&rdmacg_mutex);
|
||||
|
||||
list_for_each_entry(device, &rdmacg_devices, dev_node) {
|
||||
seq_printf(sf, "%s ", device->name);
|
||||
|
||||
rpool = find_cg_rpool_locked(cg, device);
|
||||
print_rpool_values(sf, rpool);
|
||||
|
||||
seq_putc(sf, '\n');
|
||||
}
|
||||
|
||||
mutex_unlock(&rdmacg_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct cftype rdmacg_files[] = {
|
||||
{
|
||||
.name = "max",
|
||||
.write = rdmacg_resource_set_max,
|
||||
.seq_show = rdmacg_resource_read,
|
||||
.private = RDMACG_RESOURCE_TYPE_MAX,
|
||||
.flags = CFTYPE_NOT_ON_ROOT,
|
||||
},
|
||||
{
|
||||
.name = "current",
|
||||
.seq_show = rdmacg_resource_read,
|
||||
.private = RDMACG_RESOURCE_TYPE_STAT,
|
||||
.flags = CFTYPE_NOT_ON_ROOT,
|
||||
},
|
||||
{ } /* terminate */
|
||||
};
|
||||
|
||||
static struct cgroup_subsys_state *
|
||||
rdmacg_css_alloc(struct cgroup_subsys_state *parent)
|
||||
{
|
||||
struct rdma_cgroup *cg;
|
||||
|
||||
cg = kzalloc(sizeof(*cg), GFP_KERNEL);
|
||||
if (!cg)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
INIT_LIST_HEAD(&cg->rpools);
|
||||
return &cg->css;
|
||||
}
|
||||
|
||||
static void rdmacg_css_free(struct cgroup_subsys_state *css)
|
||||
{
|
||||
struct rdma_cgroup *cg = css_rdmacg(css);
|
||||
|
||||
kfree(cg);
|
||||
}
|
||||
|
||||
/**
|
||||
* rdmacg_css_offline - cgroup css_offline callback
|
||||
* @css: css of interest
|
||||
*
|
||||
* This function is called when @css is about to go away and responsible
|
||||
* for shooting down all rdmacg associated with @css. As part of that it
|
||||
* marks all the resource pool entries to max value, so that when resources are
|
||||
* uncharged, associated resource pool can be freed as well.
|
||||
*/
|
||||
static void rdmacg_css_offline(struct cgroup_subsys_state *css)
|
||||
{
|
||||
struct rdma_cgroup *cg = css_rdmacg(css);
|
||||
struct rdmacg_resource_pool *rpool;
|
||||
|
||||
mutex_lock(&rdmacg_mutex);
|
||||
|
||||
list_for_each_entry(rpool, &cg->rpools, cg_node)
|
||||
set_all_resource_max_limit(rpool);
|
||||
|
||||
mutex_unlock(&rdmacg_mutex);
|
||||
}
|
||||
|
||||
struct cgroup_subsys rdma_cgrp_subsys = {
|
||||
.css_alloc = rdmacg_css_alloc,
|
||||
.css_free = rdmacg_css_free,
|
||||
.css_offline = rdmacg_css_offline,
|
||||
.legacy_cftypes = rdmacg_files,
|
||||
.dfl_cftypes = rdmacg_files,
|
||||
};
|
@@ -10959,5 +10959,11 @@ struct cgroup_subsys perf_event_cgrp_subsys = {
|
||||
.css_alloc = perf_cgroup_css_alloc,
|
||||
.css_free = perf_cgroup_css_free,
|
||||
.attach = perf_cgroup_attach,
|
||||
/*
|
||||
* Implicitly enable on dfl hierarchy so that perf events can
|
||||
* always be filtered by cgroup2 path as long as perf_event
|
||||
* controller is not mounted on a legacy hierarchy.
|
||||
*/
|
||||
.implicit_on_dfl = true,
|
||||
};
|
||||
#endif /* CONFIG_CGROUP_PERF */
|
||||
|
Reference in New Issue
Block a user