Merge branch 'for-4.15' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup
Pull cgroup updates from Tejun Heo: "Cgroup2 cpu controller support is finally merged. - Basic cpu statistics support to allow monitoring by default without the CPU controller enabled. - cgroup2 cpu controller support. - /sys/kernel/cgroup files to help dealing with new / optional features" * 'for-4.15' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup: cgroup: export list of cgroups v2 features using sysfs cgroup: export list of delegatable control files using sysfs cgroup: mark @cgrp __maybe_unused in cpu_stat_show() MAINTAINERS: relocate cpuset.c cgroup, sched: Move basic cpu stats from cgroup.stat to cpu.stat sched: Implement interface for cgroup unified hierarchy sched: Misc preps for cgroup unified hierarchy interface sched/cputime: Add dummy cputime_adjust() implementation for CONFIG_VIRT_CPU_ACCOUNTING_NATIVE cgroup: statically initialize init_css_set->dfl_cgrp cgroup: Implement cgroup2 basic CPU usage accounting cpuacct: Introduce cgroup_account_cputime[_field]() sched/cputime: Expose cputime_adjust()
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
obj-y := cgroup.o namespace.o cgroup-v1.o
|
||||
obj-y := cgroup.o stat.o namespace.o cgroup-v1.o
|
||||
|
||||
obj-$(CONFIG_CGROUP_FREEZER) += freezer.o
|
||||
obj-$(CONFIG_CGROUP_PIDS) += pids.o
|
||||
|
@@ -200,6 +200,15 @@ int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
|
||||
|
||||
int cgroup_task_count(const struct cgroup *cgrp);
|
||||
|
||||
/*
|
||||
* stat.c
|
||||
*/
|
||||
void cgroup_stat_flush(struct cgroup *cgrp);
|
||||
int cgroup_stat_init(struct cgroup *cgrp);
|
||||
void cgroup_stat_exit(struct cgroup *cgrp);
|
||||
void cgroup_stat_show_cputime(struct seq_file *seq);
|
||||
void cgroup_stat_boot(void);
|
||||
|
||||
/*
|
||||
* namespace.c
|
||||
*/
|
||||
|
@@ -142,12 +142,14 @@ static struct static_key_true *cgroup_subsys_on_dfl_key[] = {
|
||||
};
|
||||
#undef SUBSYS
|
||||
|
||||
static DEFINE_PER_CPU(struct cgroup_cpu_stat, cgrp_dfl_root_cpu_stat);
|
||||
|
||||
/*
|
||||
* The default hierarchy, reserved for the subsystems that are otherwise
|
||||
* unattached - it never has more than a single cgroup, and all tasks are
|
||||
* part of that cgroup.
|
||||
*/
|
||||
struct cgroup_root cgrp_dfl_root;
|
||||
struct cgroup_root cgrp_dfl_root = { .cgrp.cpu_stat = &cgrp_dfl_root_cpu_stat };
|
||||
EXPORT_SYMBOL_GPL(cgrp_dfl_root);
|
||||
|
||||
/*
|
||||
@@ -461,6 +463,28 @@ static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp,
|
||||
return &cgrp->self;
|
||||
}
|
||||
|
||||
/**
|
||||
* cgroup_tryget_css - try to get a cgroup's css for the specified subsystem
|
||||
* @cgrp: the cgroup of interest
|
||||
* @ss: the subsystem of interest
|
||||
*
|
||||
* Find and get @cgrp's css assocaited with @ss. If the css doesn't exist
|
||||
* or is offline, %NULL is returned.
|
||||
*/
|
||||
static struct cgroup_subsys_state *cgroup_tryget_css(struct cgroup *cgrp,
|
||||
struct cgroup_subsys *ss)
|
||||
{
|
||||
struct cgroup_subsys_state *css;
|
||||
|
||||
rcu_read_lock();
|
||||
css = cgroup_css(cgrp, ss);
|
||||
if (!css || !css_tryget_online(css))
|
||||
css = NULL;
|
||||
rcu_read_unlock();
|
||||
|
||||
return css;
|
||||
}
|
||||
|
||||
/**
|
||||
* cgroup_e_css - obtain a cgroup's effective css for the specified subsystem
|
||||
* @cgrp: the cgroup of interest
|
||||
@@ -647,6 +671,14 @@ struct css_set init_css_set = {
|
||||
.cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links),
|
||||
.mg_preload_node = LIST_HEAD_INIT(init_css_set.mg_preload_node),
|
||||
.mg_node = LIST_HEAD_INIT(init_css_set.mg_node),
|
||||
|
||||
/*
|
||||
* The following field is re-initialized when this cset gets linked
|
||||
* in cgroup_init(). However, let's initialize the field
|
||||
* statically too so that the default cgroup can be accessed safely
|
||||
* early during boot.
|
||||
*/
|
||||
.dfl_cgrp = &cgrp_dfl_root.cgrp,
|
||||
};
|
||||
|
||||
static int css_set_count = 1; /* 1 for init_css_set */
|
||||
@@ -3315,6 +3347,37 @@ static int cgroup_stat_show(struct seq_file *seq, void *v)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused cgroup_extra_stat_show(struct seq_file *seq,
|
||||
struct cgroup *cgrp, int ssid)
|
||||
{
|
||||
struct cgroup_subsys *ss = cgroup_subsys[ssid];
|
||||
struct cgroup_subsys_state *css;
|
||||
int ret;
|
||||
|
||||
if (!ss->css_extra_stat_show)
|
||||
return 0;
|
||||
|
||||
css = cgroup_tryget_css(cgrp, ss);
|
||||
if (!css)
|
||||
return 0;
|
||||
|
||||
ret = ss->css_extra_stat_show(seq, css);
|
||||
css_put(css);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int cpu_stat_show(struct seq_file *seq, void *v)
|
||||
{
|
||||
struct cgroup __maybe_unused *cgrp = seq_css(seq)->cgroup;
|
||||
int ret = 0;
|
||||
|
||||
cgroup_stat_show_cputime(seq);
|
||||
#ifdef CONFIG_CGROUP_SCHED
|
||||
ret = cgroup_extra_stat_show(seq, cgrp, cpu_cgrp_id);
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int cgroup_file_open(struct kernfs_open_file *of)
|
||||
{
|
||||
struct cftype *cft = of->kn->priv;
|
||||
@@ -4422,6 +4485,11 @@ static struct cftype cgroup_base_files[] = {
|
||||
.name = "cgroup.stat",
|
||||
.seq_show = cgroup_stat_show,
|
||||
},
|
||||
{
|
||||
.name = "cpu.stat",
|
||||
.flags = CFTYPE_NOT_ON_ROOT,
|
||||
.seq_show = cpu_stat_show,
|
||||
},
|
||||
{ } /* terminate */
|
||||
};
|
||||
|
||||
@@ -4482,6 +4550,8 @@ static void css_free_work_fn(struct work_struct *work)
|
||||
*/
|
||||
cgroup_put(cgroup_parent(cgrp));
|
||||
kernfs_put(cgrp->kn);
|
||||
if (cgroup_on_dfl(cgrp))
|
||||
cgroup_stat_exit(cgrp);
|
||||
kfree(cgrp);
|
||||
} else {
|
||||
/*
|
||||
@@ -4526,6 +4596,9 @@ static void css_release_work_fn(struct work_struct *work)
|
||||
/* cgroup release path */
|
||||
trace_cgroup_release(cgrp);
|
||||
|
||||
if (cgroup_on_dfl(cgrp))
|
||||
cgroup_stat_flush(cgrp);
|
||||
|
||||
for (tcgrp = cgroup_parent(cgrp); tcgrp;
|
||||
tcgrp = cgroup_parent(tcgrp))
|
||||
tcgrp->nr_dying_descendants--;
|
||||
@@ -4709,6 +4782,12 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
|
||||
if (ret)
|
||||
goto out_free_cgrp;
|
||||
|
||||
if (cgroup_on_dfl(parent)) {
|
||||
ret = cgroup_stat_init(cgrp);
|
||||
if (ret)
|
||||
goto out_cancel_ref;
|
||||
}
|
||||
|
||||
/*
|
||||
* Temporarily set the pointer to NULL, so idr_find() won't return
|
||||
* a half-baked cgroup.
|
||||
@@ -4716,7 +4795,7 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
|
||||
cgrp->id = cgroup_idr_alloc(&root->cgroup_idr, NULL, 2, 0, GFP_KERNEL);
|
||||
if (cgrp->id < 0) {
|
||||
ret = -ENOMEM;
|
||||
goto out_cancel_ref;
|
||||
goto out_stat_exit;
|
||||
}
|
||||
|
||||
init_cgroup_housekeeping(cgrp);
|
||||
@@ -4767,6 +4846,9 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
|
||||
|
||||
out_idr_free:
|
||||
cgroup_idr_remove(&root->cgroup_idr, cgrp->id);
|
||||
out_stat_exit:
|
||||
if (cgroup_on_dfl(parent))
|
||||
cgroup_stat_exit(cgrp);
|
||||
out_cancel_ref:
|
||||
percpu_ref_exit(&cgrp->self.refcnt);
|
||||
out_free_cgrp:
|
||||
@@ -5161,6 +5243,8 @@ int __init cgroup_init(void)
|
||||
BUG_ON(cgroup_init_cftypes(NULL, cgroup_base_files));
|
||||
BUG_ON(cgroup_init_cftypes(NULL, cgroup1_base_files));
|
||||
|
||||
cgroup_stat_boot();
|
||||
|
||||
/*
|
||||
* The latency of the synchronize_sched() is too high for cgroups,
|
||||
* avoid it at the cost of forcing all readers into the slow path.
|
||||
@@ -5780,3 +5864,72 @@ int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
|
||||
return ret;
|
||||
}
|
||||
#endif /* CONFIG_CGROUP_BPF */
|
||||
|
||||
#ifdef CONFIG_SYSFS
|
||||
static ssize_t show_delegatable_files(struct cftype *files, char *buf,
|
||||
ssize_t size, const char *prefix)
|
||||
{
|
||||
struct cftype *cft;
|
||||
ssize_t ret = 0;
|
||||
|
||||
for (cft = files; cft && cft->name[0] != '\0'; cft++) {
|
||||
if (!(cft->flags & CFTYPE_NS_DELEGATABLE))
|
||||
continue;
|
||||
|
||||
if (prefix)
|
||||
ret += snprintf(buf + ret, size - ret, "%s.", prefix);
|
||||
|
||||
ret += snprintf(buf + ret, size - ret, "%s\n", cft->name);
|
||||
|
||||
if (unlikely(ret >= size)) {
|
||||
WARN_ON(1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t delegate_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct cgroup_subsys *ss;
|
||||
int ssid;
|
||||
ssize_t ret = 0;
|
||||
|
||||
ret = show_delegatable_files(cgroup_base_files, buf, PAGE_SIZE - ret,
|
||||
NULL);
|
||||
|
||||
for_each_subsys(ss, ssid)
|
||||
ret += show_delegatable_files(ss->dfl_cftypes, buf + ret,
|
||||
PAGE_SIZE - ret,
|
||||
cgroup_subsys_name[ssid]);
|
||||
|
||||
return ret;
|
||||
}
|
||||
static struct kobj_attribute cgroup_delegate_attr = __ATTR_RO(delegate);
|
||||
|
||||
static ssize_t features_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "nsdelegate\n");
|
||||
}
|
||||
static struct kobj_attribute cgroup_features_attr = __ATTR_RO(features);
|
||||
|
||||
static struct attribute *cgroup_sysfs_attrs[] = {
|
||||
&cgroup_delegate_attr.attr,
|
||||
&cgroup_features_attr.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static const struct attribute_group cgroup_sysfs_attr_group = {
|
||||
.attrs = cgroup_sysfs_attrs,
|
||||
.name = "cgroup",
|
||||
};
|
||||
|
||||
static int __init cgroup_sysfs_init(void)
|
||||
{
|
||||
return sysfs_create_group(kernel_kobj, &cgroup_sysfs_attr_group);
|
||||
}
|
||||
subsys_initcall(cgroup_sysfs_init);
|
||||
#endif /* CONFIG_SYSFS */
|
||||
|
334
kernel/cgroup/stat.c
Normal file
334
kernel/cgroup/stat.c
Normal file
@@ -0,0 +1,334 @@
|
||||
#include "cgroup-internal.h"
|
||||
|
||||
#include <linux/sched/cputime.h>
|
||||
|
||||
static DEFINE_MUTEX(cgroup_stat_mutex);
|
||||
static DEFINE_PER_CPU(raw_spinlock_t, cgroup_cpu_stat_lock);
|
||||
|
||||
static struct cgroup_cpu_stat *cgroup_cpu_stat(struct cgroup *cgrp, int cpu)
|
||||
{
|
||||
return per_cpu_ptr(cgrp->cpu_stat, cpu);
|
||||
}
|
||||
|
||||
/**
|
||||
* cgroup_cpu_stat_updated - keep track of updated cpu_stat
|
||||
* @cgrp: target cgroup
|
||||
* @cpu: cpu on which cpu_stat was updated
|
||||
*
|
||||
* @cgrp's cpu_stat on @cpu was updated. Put it on the parent's matching
|
||||
* cpu_stat->updated_children list. See the comment on top of
|
||||
* cgroup_cpu_stat definition for details.
|
||||
*/
|
||||
static void cgroup_cpu_stat_updated(struct cgroup *cgrp, int cpu)
|
||||
{
|
||||
raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_cpu_stat_lock, cpu);
|
||||
struct cgroup *parent;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* Speculative already-on-list test. This may race leading to
|
||||
* temporary inaccuracies, which is fine.
|
||||
*
|
||||
* Because @parent's updated_children is terminated with @parent
|
||||
* instead of NULL, we can tell whether @cgrp is on the list by
|
||||
* testing the next pointer for NULL.
|
||||
*/
|
||||
if (cgroup_cpu_stat(cgrp, cpu)->updated_next)
|
||||
return;
|
||||
|
||||
raw_spin_lock_irqsave(cpu_lock, flags);
|
||||
|
||||
/* put @cgrp and all ancestors on the corresponding updated lists */
|
||||
for (parent = cgroup_parent(cgrp); parent;
|
||||
cgrp = parent, parent = cgroup_parent(cgrp)) {
|
||||
struct cgroup_cpu_stat *cstat = cgroup_cpu_stat(cgrp, cpu);
|
||||
struct cgroup_cpu_stat *pcstat = cgroup_cpu_stat(parent, cpu);
|
||||
|
||||
/*
|
||||
* Both additions and removals are bottom-up. If a cgroup
|
||||
* is already in the tree, all ancestors are.
|
||||
*/
|
||||
if (cstat->updated_next)
|
||||
break;
|
||||
|
||||
cstat->updated_next = pcstat->updated_children;
|
||||
pcstat->updated_children = cgrp;
|
||||
}
|
||||
|
||||
raw_spin_unlock_irqrestore(cpu_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* cgroup_cpu_stat_pop_updated - iterate and dismantle cpu_stat updated tree
|
||||
* @pos: current position
|
||||
* @root: root of the tree to traversal
|
||||
* @cpu: target cpu
|
||||
*
|
||||
* Walks the udpated cpu_stat tree on @cpu from @root. %NULL @pos starts
|
||||
* the traversal and %NULL return indicates the end. During traversal,
|
||||
* each returned cgroup is unlinked from the tree. Must be called with the
|
||||
* matching cgroup_cpu_stat_lock held.
|
||||
*
|
||||
* The only ordering guarantee is that, for a parent and a child pair
|
||||
* covered by a given traversal, if a child is visited, its parent is
|
||||
* guaranteed to be visited afterwards.
|
||||
*/
|
||||
static struct cgroup *cgroup_cpu_stat_pop_updated(struct cgroup *pos,
|
||||
struct cgroup *root, int cpu)
|
||||
{
|
||||
struct cgroup_cpu_stat *cstat;
|
||||
struct cgroup *parent;
|
||||
|
||||
if (pos == root)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* We're gonna walk down to the first leaf and visit/remove it. We
|
||||
* can pick whatever unvisited node as the starting point.
|
||||
*/
|
||||
if (!pos)
|
||||
pos = root;
|
||||
else
|
||||
pos = cgroup_parent(pos);
|
||||
|
||||
/* walk down to the first leaf */
|
||||
while (true) {
|
||||
cstat = cgroup_cpu_stat(pos, cpu);
|
||||
if (cstat->updated_children == pos)
|
||||
break;
|
||||
pos = cstat->updated_children;
|
||||
}
|
||||
|
||||
/*
|
||||
* Unlink @pos from the tree. As the updated_children list is
|
||||
* singly linked, we have to walk it to find the removal point.
|
||||
* However, due to the way we traverse, @pos will be the first
|
||||
* child in most cases. The only exception is @root.
|
||||
*/
|
||||
parent = cgroup_parent(pos);
|
||||
if (parent && cstat->updated_next) {
|
||||
struct cgroup_cpu_stat *pcstat = cgroup_cpu_stat(parent, cpu);
|
||||
struct cgroup_cpu_stat *ncstat;
|
||||
struct cgroup **nextp;
|
||||
|
||||
nextp = &pcstat->updated_children;
|
||||
while (true) {
|
||||
ncstat = cgroup_cpu_stat(*nextp, cpu);
|
||||
if (*nextp == pos)
|
||||
break;
|
||||
|
||||
WARN_ON_ONCE(*nextp == parent);
|
||||
nextp = &ncstat->updated_next;
|
||||
}
|
||||
|
||||
*nextp = cstat->updated_next;
|
||||
cstat->updated_next = NULL;
|
||||
}
|
||||
|
||||
return pos;
|
||||
}
|
||||
|
||||
static void cgroup_stat_accumulate(struct cgroup_stat *dst_stat,
|
||||
struct cgroup_stat *src_stat)
|
||||
{
|
||||
dst_stat->cputime.utime += src_stat->cputime.utime;
|
||||
dst_stat->cputime.stime += src_stat->cputime.stime;
|
||||
dst_stat->cputime.sum_exec_runtime += src_stat->cputime.sum_exec_runtime;
|
||||
}
|
||||
|
||||
static void cgroup_cpu_stat_flush_one(struct cgroup *cgrp, int cpu)
|
||||
{
|
||||
struct cgroup *parent = cgroup_parent(cgrp);
|
||||
struct cgroup_cpu_stat *cstat = cgroup_cpu_stat(cgrp, cpu);
|
||||
struct task_cputime *last_cputime = &cstat->last_cputime;
|
||||
struct task_cputime cputime;
|
||||
struct cgroup_stat delta;
|
||||
unsigned seq;
|
||||
|
||||
lockdep_assert_held(&cgroup_stat_mutex);
|
||||
|
||||
/* fetch the current per-cpu values */
|
||||
do {
|
||||
seq = __u64_stats_fetch_begin(&cstat->sync);
|
||||
cputime = cstat->cputime;
|
||||
} while (__u64_stats_fetch_retry(&cstat->sync, seq));
|
||||
|
||||
/* accumulate the deltas to propgate */
|
||||
delta.cputime.utime = cputime.utime - last_cputime->utime;
|
||||
delta.cputime.stime = cputime.stime - last_cputime->stime;
|
||||
delta.cputime.sum_exec_runtime = cputime.sum_exec_runtime -
|
||||
last_cputime->sum_exec_runtime;
|
||||
*last_cputime = cputime;
|
||||
|
||||
/* transfer the pending stat into delta */
|
||||
cgroup_stat_accumulate(&delta, &cgrp->pending_stat);
|
||||
memset(&cgrp->pending_stat, 0, sizeof(cgrp->pending_stat));
|
||||
|
||||
/* propagate delta into the global stat and the parent's pending */
|
||||
cgroup_stat_accumulate(&cgrp->stat, &delta);
|
||||
if (parent)
|
||||
cgroup_stat_accumulate(&parent->pending_stat, &delta);
|
||||
}
|
||||
|
||||
/* see cgroup_stat_flush() */
|
||||
static void cgroup_stat_flush_locked(struct cgroup *cgrp)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
lockdep_assert_held(&cgroup_stat_mutex);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_cpu_stat_lock, cpu);
|
||||
struct cgroup *pos = NULL;
|
||||
|
||||
raw_spin_lock_irq(cpu_lock);
|
||||
while ((pos = cgroup_cpu_stat_pop_updated(pos, cgrp, cpu)))
|
||||
cgroup_cpu_stat_flush_one(pos, cpu);
|
||||
raw_spin_unlock_irq(cpu_lock);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* cgroup_stat_flush - flush stats in @cgrp's subtree
|
||||
* @cgrp: target cgroup
|
||||
*
|
||||
* Collect all per-cpu stats in @cgrp's subtree into the global counters
|
||||
* and propagate them upwards. After this function returns, all cgroups in
|
||||
* the subtree have up-to-date ->stat.
|
||||
*
|
||||
* This also gets all cgroups in the subtree including @cgrp off the
|
||||
* ->updated_children lists.
|
||||
*/
|
||||
void cgroup_stat_flush(struct cgroup *cgrp)
|
||||
{
|
||||
mutex_lock(&cgroup_stat_mutex);
|
||||
cgroup_stat_flush_locked(cgrp);
|
||||
mutex_unlock(&cgroup_stat_mutex);
|
||||
}
|
||||
|
||||
static struct cgroup_cpu_stat *cgroup_cpu_stat_account_begin(struct cgroup *cgrp)
|
||||
{
|
||||
struct cgroup_cpu_stat *cstat;
|
||||
|
||||
cstat = get_cpu_ptr(cgrp->cpu_stat);
|
||||
u64_stats_update_begin(&cstat->sync);
|
||||
return cstat;
|
||||
}
|
||||
|
||||
static void cgroup_cpu_stat_account_end(struct cgroup *cgrp,
|
||||
struct cgroup_cpu_stat *cstat)
|
||||
{
|
||||
u64_stats_update_end(&cstat->sync);
|
||||
cgroup_cpu_stat_updated(cgrp, smp_processor_id());
|
||||
put_cpu_ptr(cstat);
|
||||
}
|
||||
|
||||
void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec)
|
||||
{
|
||||
struct cgroup_cpu_stat *cstat;
|
||||
|
||||
cstat = cgroup_cpu_stat_account_begin(cgrp);
|
||||
cstat->cputime.sum_exec_runtime += delta_exec;
|
||||
cgroup_cpu_stat_account_end(cgrp, cstat);
|
||||
}
|
||||
|
||||
void __cgroup_account_cputime_field(struct cgroup *cgrp,
|
||||
enum cpu_usage_stat index, u64 delta_exec)
|
||||
{
|
||||
struct cgroup_cpu_stat *cstat;
|
||||
|
||||
cstat = cgroup_cpu_stat_account_begin(cgrp);
|
||||
|
||||
switch (index) {
|
||||
case CPUTIME_USER:
|
||||
case CPUTIME_NICE:
|
||||
cstat->cputime.utime += delta_exec;
|
||||
break;
|
||||
case CPUTIME_SYSTEM:
|
||||
case CPUTIME_IRQ:
|
||||
case CPUTIME_SOFTIRQ:
|
||||
cstat->cputime.stime += delta_exec;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
cgroup_cpu_stat_account_end(cgrp, cstat);
|
||||
}
|
||||
|
||||
void cgroup_stat_show_cputime(struct seq_file *seq)
|
||||
{
|
||||
struct cgroup *cgrp = seq_css(seq)->cgroup;
|
||||
u64 usage, utime, stime;
|
||||
|
||||
if (!cgroup_parent(cgrp))
|
||||
return;
|
||||
|
||||
mutex_lock(&cgroup_stat_mutex);
|
||||
|
||||
cgroup_stat_flush_locked(cgrp);
|
||||
|
||||
usage = cgrp->stat.cputime.sum_exec_runtime;
|
||||
cputime_adjust(&cgrp->stat.cputime, &cgrp->stat.prev_cputime,
|
||||
&utime, &stime);
|
||||
|
||||
mutex_unlock(&cgroup_stat_mutex);
|
||||
|
||||
do_div(usage, NSEC_PER_USEC);
|
||||
do_div(utime, NSEC_PER_USEC);
|
||||
do_div(stime, NSEC_PER_USEC);
|
||||
|
||||
seq_printf(seq, "usage_usec %llu\n"
|
||||
"user_usec %llu\n"
|
||||
"system_usec %llu\n",
|
||||
usage, utime, stime);
|
||||
}
|
||||
|
||||
int cgroup_stat_init(struct cgroup *cgrp)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
/* the root cgrp has cpu_stat preallocated */
|
||||
if (!cgrp->cpu_stat) {
|
||||
cgrp->cpu_stat = alloc_percpu(struct cgroup_cpu_stat);
|
||||
if (!cgrp->cpu_stat)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* ->updated_children list is self terminated */
|
||||
for_each_possible_cpu(cpu)
|
||||
cgroup_cpu_stat(cgrp, cpu)->updated_children = cgrp;
|
||||
|
||||
prev_cputime_init(&cgrp->stat.prev_cputime);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void cgroup_stat_exit(struct cgroup *cgrp)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
cgroup_stat_flush(cgrp);
|
||||
|
||||
/* sanity check */
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct cgroup_cpu_stat *cstat = cgroup_cpu_stat(cgrp, cpu);
|
||||
|
||||
if (WARN_ON_ONCE(cstat->updated_children != cgrp) ||
|
||||
WARN_ON_ONCE(cstat->updated_next))
|
||||
return;
|
||||
}
|
||||
|
||||
free_percpu(cgrp->cpu_stat);
|
||||
cgrp->cpu_stat = NULL;
|
||||
}
|
||||
|
||||
void __init cgroup_stat_boot(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
raw_spin_lock_init(per_cpu_ptr(&cgroup_cpu_stat_lock, cpu));
|
||||
|
||||
BUG_ON(cgroup_stat_init(&cgrp_dfl_root.cgrp));
|
||||
}
|
Reference in New Issue
Block a user