Merge branch 'akpm' (patches from Andrew Morton)
Merge first patch-bomb from Andrew Morton: "Quite a lot of other stuff is banked up awaiting further next->mainline merging, but this batch contains: - Lots of random misc patches - OCFS2 - Most of MM - backlight updates - lib/ updates - printk updates - checkpatch updates - epoll tweaking - rtc updates - hfs - hfsplus - documentation - procfs - update gcov to gcc-4.7 format - IPC" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (269 commits) ipc, msg: fix message length check for negative values ipc/util.c: remove unnecessary work pending test devpts: plug the memory leak in kill_sb ./Makefile: export initial ramdisk compression config option init/Kconfig: add option to disable kernel compression drivers: w1: make w1_slave::flags long to avoid memory corruption drivers/w1/masters/ds1wm.cuse dev_get_platdata() drivers/memstick/core/ms_block.c: fix unreachable state in h_msb_read_page() drivers/memstick/core/mspro_block.c: fix attributes array allocation drivers/pps/clients/pps-gpio.c: remove redundant of_match_ptr kernel/panic.c: reduce 1 byte usage for print tainted buffer gcov: reuse kbasename helper kernel/gcov/fs.c: use pr_warn() kernel/module.c: use pr_foo() gcov: compile specific gcov implementation based on gcc version gcov: add support for gcc 4.7 gcov format gcov: move gcov structs definitions to a gcc version specific file kernel/taskstats.c: return -ENOMEM when alloc memory fails in add_del_listener() kernel/taskstats.c: add nla_nest_cancel() for failure processing between nla_nest_start() and nla_nest_end() kernel/sysctl_binary.c: use scnprintf() instead of snprintf() ...
This commit is contained in:
@@ -59,6 +59,7 @@
|
||||
#include <net/sock.h>
|
||||
#include <net/ip.h>
|
||||
#include <net/tcp_memcontrol.h>
|
||||
#include "slab.h"
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
@@ -2968,7 +2969,7 @@ static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p)
|
||||
|
||||
VM_BUG_ON(p->is_root_cache);
|
||||
cachep = p->root_cache;
|
||||
return cachep->memcg_params->memcg_caches[memcg_cache_id(p->memcg)];
|
||||
return cache_from_memcg_idx(cachep, memcg_cache_id(p->memcg));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SLABINFO
|
||||
@@ -2997,21 +2998,14 @@ static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size)
|
||||
struct res_counter *fail_res;
|
||||
struct mem_cgroup *_memcg;
|
||||
int ret = 0;
|
||||
bool may_oom;
|
||||
|
||||
ret = res_counter_charge(&memcg->kmem, size, &fail_res);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Conditions under which we can wait for the oom_killer. Those are
|
||||
* the same conditions tested by the core page allocator
|
||||
*/
|
||||
may_oom = (gfp & __GFP_FS) && !(gfp & __GFP_NORETRY);
|
||||
|
||||
_memcg = memcg;
|
||||
ret = __mem_cgroup_try_charge(NULL, gfp, size >> PAGE_SHIFT,
|
||||
&_memcg, may_oom);
|
||||
&_memcg, oom_gfp_allowed(gfp));
|
||||
|
||||
if (ret == -EINTR) {
|
||||
/*
|
||||
@@ -3151,7 +3145,7 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
|
||||
{
|
||||
struct memcg_cache_params *cur_params = s->memcg_params;
|
||||
|
||||
VM_BUG_ON(s->memcg_params && !s->memcg_params->is_root_cache);
|
||||
VM_BUG_ON(!is_root_cache(s));
|
||||
|
||||
if (num_groups > memcg_limited_groups_array_size) {
|
||||
int i;
|
||||
@@ -3412,7 +3406,7 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
|
||||
idx = memcg_cache_id(memcg);
|
||||
|
||||
mutex_lock(&memcg_cache_mutex);
|
||||
new_cachep = cachep->memcg_params->memcg_caches[idx];
|
||||
new_cachep = cache_from_memcg_idx(cachep, idx);
|
||||
if (new_cachep) {
|
||||
css_put(&memcg->css);
|
||||
goto out;
|
||||
@@ -3458,8 +3452,8 @@ void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
|
||||
* we'll take the set_limit_mutex to protect ourselves against this.
|
||||
*/
|
||||
mutex_lock(&set_limit_mutex);
|
||||
for (i = 0; i < memcg_limited_groups_array_size; i++) {
|
||||
c = s->memcg_params->memcg_caches[i];
|
||||
for_each_memcg_cache_index(i) {
|
||||
c = cache_from_memcg_idx(s, i);
|
||||
if (!c)
|
||||
continue;
|
||||
|
||||
@@ -3592,8 +3586,8 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
|
||||
* code updating memcg_caches will issue a write barrier to match this.
|
||||
*/
|
||||
read_barrier_depends();
|
||||
if (likely(cachep->memcg_params->memcg_caches[idx])) {
|
||||
cachep = cachep->memcg_params->memcg_caches[idx];
|
||||
if (likely(cache_from_memcg_idx(cachep, idx))) {
|
||||
cachep = cache_from_memcg_idx(cachep, idx);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -5389,45 +5383,50 @@ static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
|
||||
static int memcg_numa_stat_show(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, struct seq_file *m)
|
||||
{
|
||||
struct numa_stat {
|
||||
const char *name;
|
||||
unsigned int lru_mask;
|
||||
};
|
||||
|
||||
static const struct numa_stat stats[] = {
|
||||
{ "total", LRU_ALL },
|
||||
{ "file", LRU_ALL_FILE },
|
||||
{ "anon", LRU_ALL_ANON },
|
||||
{ "unevictable", BIT(LRU_UNEVICTABLE) },
|
||||
};
|
||||
const struct numa_stat *stat;
|
||||
int nid;
|
||||
unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
|
||||
unsigned long node_nr;
|
||||
unsigned long nr;
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
||||
|
||||
total_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL);
|
||||
seq_printf(m, "total=%lu", total_nr);
|
||||
for_each_node_state(nid, N_MEMORY) {
|
||||
node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL);
|
||||
seq_printf(m, " N%d=%lu", nid, node_nr);
|
||||
for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
|
||||
nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
|
||||
seq_printf(m, "%s=%lu", stat->name, nr);
|
||||
for_each_node_state(nid, N_MEMORY) {
|
||||
nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
|
||||
stat->lru_mask);
|
||||
seq_printf(m, " N%d=%lu", nid, nr);
|
||||
}
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
seq_putc(m, '\n');
|
||||
|
||||
file_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_FILE);
|
||||
seq_printf(m, "file=%lu", file_nr);
|
||||
for_each_node_state(nid, N_MEMORY) {
|
||||
node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
|
||||
LRU_ALL_FILE);
|
||||
seq_printf(m, " N%d=%lu", nid, node_nr);
|
||||
}
|
||||
seq_putc(m, '\n');
|
||||
for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
|
||||
struct mem_cgroup *iter;
|
||||
|
||||
anon_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_ANON);
|
||||
seq_printf(m, "anon=%lu", anon_nr);
|
||||
for_each_node_state(nid, N_MEMORY) {
|
||||
node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
|
||||
LRU_ALL_ANON);
|
||||
seq_printf(m, " N%d=%lu", nid, node_nr);
|
||||
nr = 0;
|
||||
for_each_mem_cgroup_tree(iter, memcg)
|
||||
nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
|
||||
seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
|
||||
for_each_node_state(nid, N_MEMORY) {
|
||||
nr = 0;
|
||||
for_each_mem_cgroup_tree(iter, memcg)
|
||||
nr += mem_cgroup_node_nr_lru_pages(
|
||||
iter, nid, stat->lru_mask);
|
||||
seq_printf(m, " N%d=%lu", nid, nr);
|
||||
}
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
seq_putc(m, '\n');
|
||||
|
||||
unevictable_nr = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE));
|
||||
seq_printf(m, "unevictable=%lu", unevictable_nr);
|
||||
for_each_node_state(nid, N_MEMORY) {
|
||||
node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
|
||||
BIT(LRU_UNEVICTABLE));
|
||||
seq_printf(m, " N%d=%lu", nid, node_nr);
|
||||
}
|
||||
seq_putc(m, '\n');
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_NUMA */
|
||||
|
Reference in New Issue
Block a user