x86, fpu: split FPU state from task struct - v5
Split the FPU save area from the task struct. This allows easy migration of FPU context, and it's generally cleaner. It also allows the following two optimizations: 1) only allocate when the application actually uses FPU, so in the first lazy FPU trap. This could save memory for non-fpu using apps. Next patch does this lazy allocation. 2) allocate the right size for the actual cpu rather than 512 bytes always. Patches enabling xsave/xrstor support (coming shortly) will take advantage of this. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:

committed by
Ingo Molnar

parent
fa5c463941
commit
61c4628b53
@@ -132,6 +132,10 @@ void __put_task_struct(struct task_struct *tsk)
|
||||
free_task(tsk);
|
||||
}
|
||||
|
||||
void __attribute__((weak)) arch_task_cache_init(void)
|
||||
{
|
||||
}
|
||||
|
||||
void __init fork_init(unsigned long mempages)
|
||||
{
|
||||
#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
|
||||
@@ -144,6 +148,9 @@ void __init fork_init(unsigned long mempages)
|
||||
ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL);
|
||||
#endif
|
||||
|
||||
/* do the arch specific task caches init */
|
||||
arch_task_cache_init();
|
||||
|
||||
/*
|
||||
* The default maximum number of threads is set to a safe
|
||||
* value: the thread structures can take up at most half
|
||||
@@ -163,6 +170,13 @@ void __init fork_init(unsigned long mempages)
|
||||
init_task.signal->rlim[RLIMIT_NPROC];
|
||||
}
|
||||
|
||||
int __attribute__((weak)) arch_dup_task_struct(struct task_struct *dst,
|
||||
struct task_struct *src)
|
||||
{
|
||||
*dst = *src;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct task_struct *dup_task_struct(struct task_struct *orig)
|
||||
{
|
||||
struct task_struct *tsk;
|
||||
@@ -181,15 +195,15 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
*tsk = *orig;
|
||||
err = arch_dup_task_struct(tsk, orig);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
tsk->stack = ti;
|
||||
|
||||
err = prop_local_init_single(&tsk->dirties);
|
||||
if (err) {
|
||||
free_thread_info(ti);
|
||||
free_task_struct(tsk);
|
||||
return NULL;
|
||||
}
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
setup_thread_stack(tsk, orig);
|
||||
|
||||
@@ -205,6 +219,11 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
|
||||
#endif
|
||||
tsk->splice_pipe = NULL;
|
||||
return tsk;
|
||||
|
||||
out:
|
||||
free_thread_info(ti);
|
||||
free_task_struct(tsk);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
|
Reference in New Issue
Block a user