mm: memcg/slab: use a single set of kmem_caches for all accounted allocations
This is fairly big but mostly red patch, which makes all accounted slab allocations use a single set of kmem_caches instead of creating a separate set for each memory cgroup. Because the number of non-root kmem_caches is now capped by the number of root kmem_caches, there is no need to shrink or destroy them prematurely. They can be perfectly destroyed together with their root counterparts. This allows to dramatically simplify the management of non-root kmem_caches and delete a ton of code. This patch performs the following changes: 1) introduces memcg_params.memcg_cache pointer to represent the kmem_cache which will be used for all non-root allocations 2) reuses the existing memcg kmem_cache creation mechanism to create memcg kmem_cache on the first allocation attempt 3) memcg kmem_caches are named <kmemcache_name>-memcg, e.g. dentry-memcg 4) simplifies memcg_kmem_get_cache() to just return memcg kmem_cache or schedule it's creation and return the root cache 5) removes almost all non-root kmem_cache management code (separate refcounter, reparenting, shrinking, etc) 6) makes slab debugfs to display root_mem_cgroup css id and never show :dead and :deact flags in the memcg_slabinfo attribute. Following patches in the series will simplify the kmem_cache creation. Signed-off-by: Roman Gushchin <guro@fb.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Shakeel Butt <shakeelb@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Tejun Heo <tj@kernel.org> Link: http://lkml.kernel.org/r/20200623174037.3951353-13-guro@fb.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committed by
Linus Torvalds

parent
0f876e4dc5
commit
9855609bde
16
mm/slab.c
16
mm/slab.c
@@ -1249,7 +1249,7 @@ void __init kmem_cache_init(void)
|
||||
nr_node_ids * sizeof(struct kmem_cache_node *),
|
||||
SLAB_HWCACHE_ALIGN, 0, 0);
|
||||
list_add(&kmem_cache->list, &slab_caches);
|
||||
memcg_link_cache(kmem_cache, NULL);
|
||||
memcg_link_cache(kmem_cache);
|
||||
slab_state = PARTIAL;
|
||||
|
||||
/*
|
||||
@@ -2253,17 +2253,6 @@ int __kmem_cache_shrink(struct kmem_cache *cachep)
|
||||
return (ret ? 1 : 0);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMCG
|
||||
void __kmemcg_cache_deactivate(struct kmem_cache *cachep)
|
||||
{
|
||||
__kmem_cache_shrink(cachep);
|
||||
}
|
||||
|
||||
void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
int __kmem_cache_shutdown(struct kmem_cache *cachep)
|
||||
{
|
||||
return __kmem_cache_shrink(cachep);
|
||||
@@ -3872,7 +3861,8 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
|
||||
return ret;
|
||||
|
||||
lockdep_assert_held(&slab_mutex);
|
||||
for_each_memcg_cache(c, cachep) {
|
||||
c = memcg_cache(cachep);
|
||||
if (c) {
|
||||
/* return value determined by the root cache only */
|
||||
__do_tune_cpucache(c, limit, batchcount, shared, gfp);
|
||||
}
|
||||
|
Reference in New Issue
Block a user