mm: Move mm_cachep initialization to mm_init()
commit af80602799681c78f14fbe20b6185a56020dedee upstream. In order to allow using mm_alloc() much earlier, move initializing mm_cachep into mm_init(). Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20221025201057.751153381@infradead.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:

committed by
Greg Kroah-Hartman

parent
6ee042fd24
commit
1ff14defdf
@@ -63,6 +63,7 @@ extern void sched_dead(struct task_struct *p);
|
|||||||
void __noreturn do_task_dead(void);
|
void __noreturn do_task_dead(void);
|
||||||
void __noreturn make_task_dead(int signr);
|
void __noreturn make_task_dead(int signr);
|
||||||
|
|
||||||
|
extern void mm_cache_init(void);
|
||||||
extern void proc_caches_init(void);
|
extern void proc_caches_init(void);
|
||||||
|
|
||||||
extern void fork_init(void);
|
extern void fork_init(void);
|
||||||
|
@@ -835,6 +835,7 @@ static void __init mm_init(void)
|
|||||||
init_espfix_bsp();
|
init_espfix_bsp();
|
||||||
/* Should be run after espfix64 is set up. */
|
/* Should be run after espfix64 is set up. */
|
||||||
pti_init();
|
pti_init();
|
||||||
|
mm_cache_init();
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init __weak arch_call_rest_init(void)
|
void __init __weak arch_call_rest_init(void)
|
||||||
|
@@ -2818,10 +2818,27 @@ static void sighand_ctor(void *data)
|
|||||||
init_waitqueue_head(&sighand->signalfd_wqh);
|
init_waitqueue_head(&sighand->signalfd_wqh);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init proc_caches_init(void)
|
void __init mm_cache_init(void)
|
||||||
{
|
{
|
||||||
unsigned int mm_size;
|
unsigned int mm_size;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The mm_cpumask is located at the end of mm_struct, and is
|
||||||
|
* dynamically sized based on the maximum CPU number this system
|
||||||
|
* can have, taking hotplug into account (nr_cpu_ids).
|
||||||
|
*/
|
||||||
|
mm_size = sizeof(struct mm_struct) + cpumask_size();
|
||||||
|
|
||||||
|
mm_cachep = kmem_cache_create_usercopy("mm_struct",
|
||||||
|
mm_size, ARCH_MIN_MMSTRUCT_ALIGN,
|
||||||
|
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
|
||||||
|
offsetof(struct mm_struct, saved_auxv),
|
||||||
|
sizeof_field(struct mm_struct, saved_auxv),
|
||||||
|
NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
void __init proc_caches_init(void)
|
||||||
|
{
|
||||||
sighand_cachep = kmem_cache_create("sighand_cache",
|
sighand_cachep = kmem_cache_create("sighand_cache",
|
||||||
sizeof(struct sighand_struct), 0,
|
sizeof(struct sighand_struct), 0,
|
||||||
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
|
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
|
||||||
@@ -2839,19 +2856,6 @@ void __init proc_caches_init(void)
|
|||||||
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
|
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
|
||||||
NULL);
|
NULL);
|
||||||
|
|
||||||
/*
|
|
||||||
* The mm_cpumask is located at the end of mm_struct, and is
|
|
||||||
* dynamically sized based on the maximum CPU number this system
|
|
||||||
* can have, taking hotplug into account (nr_cpu_ids).
|
|
||||||
*/
|
|
||||||
mm_size = sizeof(struct mm_struct) + cpumask_size();
|
|
||||||
|
|
||||||
mm_cachep = kmem_cache_create_usercopy("mm_struct",
|
|
||||||
mm_size, ARCH_MIN_MMSTRUCT_ALIGN,
|
|
||||||
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
|
|
||||||
offsetof(struct mm_struct, saved_auxv),
|
|
||||||
sizeof_field(struct mm_struct, saved_auxv),
|
|
||||||
NULL);
|
|
||||||
vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT);
|
vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT);
|
||||||
mmap_init();
|
mmap_init();
|
||||||
nsproxy_cache_init();
|
nsproxy_cache_init();
|
||||||
|
Reference in New Issue
Block a user