x86: Use generic idle thread allocation
Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> Cc: Jeremy Fitzhardinge <jeremy@goop.org> Cc: x86@kernel.org Link: http://lkml.kernel.org/r/20120420124557.246929343@linutronix.de
This commit is contained in:
@@ -76,19 +76,7 @@
|
||||
/* State of each CPU */
|
||||
DEFINE_PER_CPU(int, cpu_state) = { 0 };
|
||||
|
||||
/* Store all idle threads, this can be reused instead of creating
|
||||
* a new thread. Also avoids complicated thread destroy functionality
|
||||
* for idle threads.
|
||||
*/
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
/*
|
||||
* Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
|
||||
* removed after init for !CONFIG_HOTPLUG_CPU.
|
||||
*/
|
||||
static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
|
||||
#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
|
||||
#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
|
||||
|
||||
/*
|
||||
* We need this for trampoline_base protection from concurrent accesses when
|
||||
* off- and onlining cores wildly.
|
||||
@@ -97,20 +85,16 @@ static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
|
||||
|
||||
void cpu_hotplug_driver_lock(void)
|
||||
{
|
||||
mutex_lock(&x86_cpu_hotplug_driver_mutex);
|
||||
mutex_lock(&x86_cpu_hotplug_driver_mutex);
|
||||
}
|
||||
|
||||
void cpu_hotplug_driver_unlock(void)
|
||||
{
|
||||
mutex_unlock(&x86_cpu_hotplug_driver_mutex);
|
||||
mutex_unlock(&x86_cpu_hotplug_driver_mutex);
|
||||
}
|
||||
|
||||
ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
|
||||
ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; }
|
||||
#else
|
||||
static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
|
||||
#define get_idle_for_cpu(x) (idle_thread_array[(x)])
|
||||
#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
|
||||
#endif
|
||||
|
||||
/* Number of siblings per CPU package */
|
||||
@@ -618,22 +602,6 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
|
||||
return (send_status | accept_status);
|
||||
}
|
||||
|
||||
struct create_idle {
|
||||
struct work_struct work;
|
||||
struct task_struct *idle;
|
||||
struct completion done;
|
||||
int cpu;
|
||||
};
|
||||
|
||||
static void __cpuinit do_fork_idle(struct work_struct *work)
|
||||
{
|
||||
struct create_idle *c_idle =
|
||||
container_of(work, struct create_idle, work);
|
||||
|
||||
c_idle->idle = fork_idle(c_idle->cpu);
|
||||
complete(&c_idle->done);
|
||||
}
|
||||
|
||||
/* reduce the number of lines printed when booting a large cpu count system */
|
||||
static void __cpuinit announce_cpu(int cpu, int apicid)
|
||||
{
|
||||
@@ -660,58 +628,31 @@ static void __cpuinit announce_cpu(int cpu, int apicid)
|
||||
* Returns zero if CPU booted OK, else error code from
|
||||
* ->wakeup_secondary_cpu.
|
||||
*/
|
||||
static int __cpuinit do_boot_cpu(int apicid, int cpu)
|
||||
static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
|
||||
{
|
||||
unsigned long boot_error = 0;
|
||||
unsigned long start_ip;
|
||||
int timeout;
|
||||
struct create_idle c_idle = {
|
||||
.cpu = cpu,
|
||||
.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
|
||||
};
|
||||
|
||||
INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
|
||||
|
||||
alternatives_smp_switch(1);
|
||||
|
||||
c_idle.idle = get_idle_for_cpu(cpu);
|
||||
idle->thread.sp = (unsigned long) (((struct pt_regs *)
|
||||
(THREAD_SIZE + task_stack_page(idle))) - 1);
|
||||
per_cpu(current_task, cpu) = idle;
|
||||
|
||||
/*
|
||||
* We can't use kernel_thread since we must avoid to
|
||||
* reschedule the child.
|
||||
*/
|
||||
if (c_idle.idle) {
|
||||
c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *)
|
||||
(THREAD_SIZE + task_stack_page(c_idle.idle))) - 1);
|
||||
init_idle(c_idle.idle, cpu);
|
||||
goto do_rest;
|
||||
}
|
||||
|
||||
schedule_work(&c_idle.work);
|
||||
wait_for_completion(&c_idle.done);
|
||||
|
||||
if (IS_ERR(c_idle.idle)) {
|
||||
printk("failed fork for CPU %d\n", cpu);
|
||||
destroy_work_on_stack(&c_idle.work);
|
||||
return PTR_ERR(c_idle.idle);
|
||||
}
|
||||
|
||||
set_idle_for_cpu(cpu, c_idle.idle);
|
||||
do_rest:
|
||||
per_cpu(current_task, cpu) = c_idle.idle;
|
||||
#ifdef CONFIG_X86_32
|
||||
/* Stack for startup_32 can be just as for start_secondary onwards */
|
||||
irq_ctx_init(cpu);
|
||||
#else
|
||||
clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
|
||||
clear_tsk_thread_flag(idle, TIF_FORK);
|
||||
initial_gs = per_cpu_offset(cpu);
|
||||
per_cpu(kernel_stack, cpu) =
|
||||
(unsigned long)task_stack_page(c_idle.idle) -
|
||||
(unsigned long)task_stack_page(idle) -
|
||||
KERNEL_STACK_OFFSET + THREAD_SIZE;
|
||||
#endif
|
||||
early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
|
||||
initial_code = (unsigned long)start_secondary;
|
||||
stack_start = c_idle.idle->thread.sp;
|
||||
stack_start = idle->thread.sp;
|
||||
|
||||
/* start_ip had better be page-aligned! */
|
||||
start_ip = trampoline_address();
|
||||
@@ -813,8 +754,6 @@ do_rest:
|
||||
*/
|
||||
smpboot_restore_warm_reset_vector();
|
||||
}
|
||||
|
||||
destroy_work_on_stack(&c_idle.work);
|
||||
return boot_error;
|
||||
}
|
||||
|
||||
@@ -851,7 +790,7 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
|
||||
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
|
||||
|
||||
err = do_boot_cpu(apicid, cpu);
|
||||
err = do_boot_cpu(apicid, cpu, tidle);
|
||||
if (err) {
|
||||
pr_debug("do_boot_cpu failed %d\n", err);
|
||||
return -EIO;
|
||||
|
Reference in New Issue
Block a user