Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull smp hotplug cleanups from Thomas Gleixner: "This series is merily a cleanup of code copied around in arch/* and not changing any of the real cpu hotplug horrors yet. I wish I'd had something more substantial for 3.5, but I underestimated the lurking horror..." Fix up trivial conflicts in arch/{arm,sparc,x86}/Kconfig and arch/sparc/include/asm/thread_info_32.h * 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (79 commits) um: Remove leftover declaration of alloc_task_struct_node() task_allocator: Use config switches instead of magic defines sparc: Use common threadinfo allocator score: Use common threadinfo allocator sh-use-common-threadinfo-allocator mn10300: Use common threadinfo allocator powerpc: Use common threadinfo allocator mips: Use common threadinfo allocator hexagon: Use common threadinfo allocator m32r: Use common threadinfo allocator frv: Use common threadinfo allocator cris: Use common threadinfo allocator x86: Use common threadinfo allocator c6x: Use common threadinfo allocator fork: Provide kmemcache based thread_info allocator tile: Use common threadinfo allocator fork: Provide weak arch_release_[task_struct|thread_info] functions fork: Move thread info gfp flags to header fork: Remove the weak insanity sh: Remove cpu_idle_wait() ...
This commit is contained in:
@@ -2,7 +2,7 @@
|
||||
# Makefile for the linux kernel.
|
||||
#
|
||||
|
||||
extra-y := head_$(BITS).o head$(BITS).o head.o init_task.o vmlinux.lds
|
||||
extra-y := head_$(BITS).o head$(BITS).o head.o vmlinux.lds
|
||||
|
||||
CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE)
|
||||
|
||||
|
@@ -2401,7 +2401,7 @@ static void __exit apm_exit(void)
|
||||
* (pm_idle), Wait for all processors to update cached/local
|
||||
* copies of pm_idle before proceeding.
|
||||
*/
|
||||
cpu_idle_wait();
|
||||
kick_all_cpus_sync();
|
||||
}
|
||||
if (((apm_info.bios.flags & APM_BIOS_DISENGAGED) == 0)
|
||||
&& (apm_info.connection_version > 0x0100)) {
|
||||
|
@@ -1,42 +0,0 @@
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/init_task.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/mqueue.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/desc.h>
|
||||
|
||||
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
|
||||
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
|
||||
|
||||
/*
|
||||
* Initial thread structure.
|
||||
*
|
||||
* We need to make sure that this is THREAD_SIZE aligned due to the
|
||||
* way process stacks are handled. This is done by having a special
|
||||
* "init_task" linker map entry..
|
||||
*/
|
||||
union thread_union init_thread_union __init_task_data =
|
||||
{ INIT_THREAD_INFO(init_task) };
|
||||
|
||||
/*
|
||||
* Initial task structure.
|
||||
*
|
||||
* All other task structs will be allocated on slabs in fork.c
|
||||
*/
|
||||
struct task_struct init_task = INIT_TASK(init_task);
|
||||
EXPORT_SYMBOL(init_task);
|
||||
|
||||
/*
|
||||
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
|
||||
* no more per-task TSS's. The TSS size is kept cacheline-aligned
|
||||
* so they are allowed to end up in the .data..cacheline_aligned
|
||||
* section. Since TSS's are completely CPU-local, we want them
|
||||
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
|
||||
*/
|
||||
DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
|
||||
|
@@ -127,8 +127,8 @@ void __cpuinit irq_ctx_init(int cpu)
|
||||
return;
|
||||
|
||||
irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
|
||||
THREAD_FLAGS,
|
||||
THREAD_ORDER));
|
||||
THREADINFO_GFP,
|
||||
THREAD_SIZE_ORDER));
|
||||
memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
|
||||
irqctx->tinfo.cpu = cpu;
|
||||
irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
|
||||
@@ -137,8 +137,8 @@ void __cpuinit irq_ctx_init(int cpu)
|
||||
per_cpu(hardirq_ctx, cpu) = irqctx;
|
||||
|
||||
irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
|
||||
THREAD_FLAGS,
|
||||
THREAD_ORDER));
|
||||
THREADINFO_GFP,
|
||||
THREAD_SIZE_ORDER));
|
||||
memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
|
||||
irqctx->tinfo.cpu = cpu;
|
||||
irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
|
||||
|
@@ -27,6 +27,15 @@
|
||||
#include <asm/debugreg.h>
|
||||
#include <asm/nmi.h>
|
||||
|
||||
/*
|
||||
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
|
||||
* no more per-task TSS's. The TSS size is kept cacheline-aligned
|
||||
* so they are allowed to end up in the .data..cacheline_aligned
|
||||
* section. Since TSS's are completely CPU-local, we want them
|
||||
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
|
||||
*/
|
||||
DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
static DEFINE_PER_CPU(unsigned char, is_idle);
|
||||
static ATOMIC_NOTIFIER_HEAD(idle_notifier);
|
||||
@@ -67,10 +76,9 @@ void free_thread_xstate(struct task_struct *tsk)
|
||||
fpu_free(&tsk->thread.fpu);
|
||||
}
|
||||
|
||||
void free_thread_info(struct thread_info *ti)
|
||||
void arch_release_task_struct(struct task_struct *tsk)
|
||||
{
|
||||
free_thread_xstate(ti->task);
|
||||
free_pages((unsigned long)ti, THREAD_ORDER);
|
||||
free_thread_xstate(tsk);
|
||||
}
|
||||
|
||||
void arch_task_cache_init(void)
|
||||
@@ -516,26 +524,6 @@ void stop_this_cpu(void *dummy)
|
||||
}
|
||||
}
|
||||
|
||||
static void do_nothing(void *unused)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* cpu_idle_wait - Used to ensure that all the CPUs discard old value of
|
||||
* pm_idle and update to new pm_idle value. Required while changing pm_idle
|
||||
* handler on SMP systems.
|
||||
*
|
||||
* Caller must have changed pm_idle to the new value before the call. Old
|
||||
* pm_idle value will not be used by any CPU after the return of this function.
|
||||
*/
|
||||
void cpu_idle_wait(void)
|
||||
{
|
||||
smp_mb();
|
||||
/* kick all the CPUs so that they exit out of pm_idle */
|
||||
smp_call_function(do_nothing, NULL, 1);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpu_idle_wait);
|
||||
|
||||
/* Default MONITOR/MWAIT with no hints, used for default C1 state */
|
||||
static void mwait_idle(void)
|
||||
{
|
||||
|
@@ -76,19 +76,7 @@
|
||||
/* State of each CPU */
|
||||
DEFINE_PER_CPU(int, cpu_state) = { 0 };
|
||||
|
||||
/* Store all idle threads, this can be reused instead of creating
|
||||
* a new thread. Also avoids complicated thread destroy functionality
|
||||
* for idle threads.
|
||||
*/
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
/*
|
||||
* Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
|
||||
* removed after init for !CONFIG_HOTPLUG_CPU.
|
||||
*/
|
||||
static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
|
||||
#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
|
||||
#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
|
||||
|
||||
/*
|
||||
* We need this for trampoline_base protection from concurrent accesses when
|
||||
* off- and onlining cores wildly.
|
||||
@@ -97,20 +85,16 @@ static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
|
||||
|
||||
void cpu_hotplug_driver_lock(void)
|
||||
{
|
||||
mutex_lock(&x86_cpu_hotplug_driver_mutex);
|
||||
mutex_lock(&x86_cpu_hotplug_driver_mutex);
|
||||
}
|
||||
|
||||
void cpu_hotplug_driver_unlock(void)
|
||||
{
|
||||
mutex_unlock(&x86_cpu_hotplug_driver_mutex);
|
||||
mutex_unlock(&x86_cpu_hotplug_driver_mutex);
|
||||
}
|
||||
|
||||
ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
|
||||
ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; }
|
||||
#else
|
||||
static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
|
||||
#define get_idle_for_cpu(x) (idle_thread_array[(x)])
|
||||
#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
|
||||
#endif
|
||||
|
||||
/* Number of siblings per CPU package */
|
||||
@@ -618,22 +602,6 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
|
||||
return (send_status | accept_status);
|
||||
}
|
||||
|
||||
struct create_idle {
|
||||
struct work_struct work;
|
||||
struct task_struct *idle;
|
||||
struct completion done;
|
||||
int cpu;
|
||||
};
|
||||
|
||||
static void __cpuinit do_fork_idle(struct work_struct *work)
|
||||
{
|
||||
struct create_idle *c_idle =
|
||||
container_of(work, struct create_idle, work);
|
||||
|
||||
c_idle->idle = fork_idle(c_idle->cpu);
|
||||
complete(&c_idle->done);
|
||||
}
|
||||
|
||||
/* reduce the number of lines printed when booting a large cpu count system */
|
||||
static void __cpuinit announce_cpu(int cpu, int apicid)
|
||||
{
|
||||
@@ -660,58 +628,31 @@ static void __cpuinit announce_cpu(int cpu, int apicid)
|
||||
* Returns zero if CPU booted OK, else error code from
|
||||
* ->wakeup_secondary_cpu.
|
||||
*/
|
||||
static int __cpuinit do_boot_cpu(int apicid, int cpu)
|
||||
static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
|
||||
{
|
||||
unsigned long boot_error = 0;
|
||||
unsigned long start_ip;
|
||||
int timeout;
|
||||
struct create_idle c_idle = {
|
||||
.cpu = cpu,
|
||||
.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
|
||||
};
|
||||
|
||||
INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
|
||||
|
||||
alternatives_smp_switch(1);
|
||||
|
||||
c_idle.idle = get_idle_for_cpu(cpu);
|
||||
idle->thread.sp = (unsigned long) (((struct pt_regs *)
|
||||
(THREAD_SIZE + task_stack_page(idle))) - 1);
|
||||
per_cpu(current_task, cpu) = idle;
|
||||
|
||||
/*
|
||||
* We can't use kernel_thread since we must avoid to
|
||||
* reschedule the child.
|
||||
*/
|
||||
if (c_idle.idle) {
|
||||
c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *)
|
||||
(THREAD_SIZE + task_stack_page(c_idle.idle))) - 1);
|
||||
init_idle(c_idle.idle, cpu);
|
||||
goto do_rest;
|
||||
}
|
||||
|
||||
schedule_work(&c_idle.work);
|
||||
wait_for_completion(&c_idle.done);
|
||||
|
||||
if (IS_ERR(c_idle.idle)) {
|
||||
printk("failed fork for CPU %d\n", cpu);
|
||||
destroy_work_on_stack(&c_idle.work);
|
||||
return PTR_ERR(c_idle.idle);
|
||||
}
|
||||
|
||||
set_idle_for_cpu(cpu, c_idle.idle);
|
||||
do_rest:
|
||||
per_cpu(current_task, cpu) = c_idle.idle;
|
||||
#ifdef CONFIG_X86_32
|
||||
/* Stack for startup_32 can be just as for start_secondary onwards */
|
||||
irq_ctx_init(cpu);
|
||||
#else
|
||||
clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
|
||||
clear_tsk_thread_flag(idle, TIF_FORK);
|
||||
initial_gs = per_cpu_offset(cpu);
|
||||
per_cpu(kernel_stack, cpu) =
|
||||
(unsigned long)task_stack_page(c_idle.idle) -
|
||||
(unsigned long)task_stack_page(idle) -
|
||||
KERNEL_STACK_OFFSET + THREAD_SIZE;
|
||||
#endif
|
||||
early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
|
||||
initial_code = (unsigned long)start_secondary;
|
||||
stack_start = c_idle.idle->thread.sp;
|
||||
stack_start = idle->thread.sp;
|
||||
|
||||
/* start_ip had better be page-aligned! */
|
||||
start_ip = trampoline_address();
|
||||
@@ -813,12 +754,10 @@ do_rest:
|
||||
*/
|
||||
smpboot_restore_warm_reset_vector();
|
||||
}
|
||||
|
||||
destroy_work_on_stack(&c_idle.work);
|
||||
return boot_error;
|
||||
}
|
||||
|
||||
int __cpuinit native_cpu_up(unsigned int cpu)
|
||||
int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
{
|
||||
int apicid = apic->cpu_present_to_apicid(cpu);
|
||||
unsigned long flags;
|
||||
@@ -851,7 +790,7 @@ int __cpuinit native_cpu_up(unsigned int cpu)
|
||||
|
||||
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
|
||||
|
||||
err = do_boot_cpu(apicid, cpu);
|
||||
err = do_boot_cpu(apicid, cpu, tidle);
|
||||
if (err) {
|
||||
pr_debug("do_boot_cpu failed %d\n", err);
|
||||
return -EIO;
|
||||
|
Reference in New Issue
Block a user