Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull SMP/hotplug updates from Thomas Gleixner: "A small set of updates for SMP and CPU hotplug: - Abort disabling secondary CPUs in the freezer when a wakeup is pending instead of evaluating it only after all CPUs have been offlined. - Remove the shared annotation for the strict per CPU cfd_data in the smp function call core code. - Remove the return values of smp_call_function() and on_each_cpu() as they are unconditionally 0. Fixup the few callers which actually bothered to check the return value" * 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: smp: Remove smp_call_function() and on_each_cpu() return values smp: Do not mark call_function_data as shared cpu/hotplug: Abort disabling secondary CPUs if wakeup is pending cpu/hotplug: Fix notify_cpu_starting() reference in bringup_wait_for_ap()
This commit is contained in:
@@ -522,7 +522,7 @@ static int bringup_wait_for_ap(unsigned int cpu)
|
||||
/*
|
||||
* SMT soft disabling on X86 requires to bring the CPU out of the
|
||||
* BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
|
||||
* CPU marked itself as booted_once in cpu_notify_starting() so the
|
||||
* CPU marked itself as booted_once in notify_cpu_starting() so the
|
||||
* cpu_smt_allowed() check will now return false if this is not the
|
||||
* primary sibling.
|
||||
*/
|
||||
@@ -1221,6 +1221,13 @@ int freeze_secondary_cpus(int primary)
|
||||
for_each_online_cpu(cpu) {
|
||||
if (cpu == primary)
|
||||
continue;
|
||||
|
||||
if (pm_wakeup_pending()) {
|
||||
pr_info("Wakeup pending. Abort CPU freeze\n");
|
||||
error = -EBUSY;
|
||||
break;
|
||||
}
|
||||
|
||||
trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
|
||||
error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
|
||||
trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
|
||||
|
12
kernel/smp.c
12
kernel/smp.c
@@ -34,7 +34,7 @@ struct call_function_data {
|
||||
cpumask_var_t cpumask_ipi;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
|
||||
static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
|
||||
|
||||
static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
|
||||
|
||||
@@ -487,13 +487,11 @@ EXPORT_SYMBOL(smp_call_function_many);
|
||||
* You must not call this function with disabled interrupts or from a
|
||||
* hardware interrupt handler or from a bottom half handler.
|
||||
*/
|
||||
int smp_call_function(smp_call_func_t func, void *info, int wait)
|
||||
void smp_call_function(smp_call_func_t func, void *info, int wait)
|
||||
{
|
||||
preempt_disable();
|
||||
smp_call_function_many(cpu_online_mask, func, info, wait);
|
||||
preempt_enable();
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(smp_call_function);
|
||||
|
||||
@@ -594,18 +592,16 @@ void __init smp_init(void)
|
||||
* early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
|
||||
* of local_irq_disable/enable().
|
||||
*/
|
||||
int on_each_cpu(void (*func) (void *info), void *info, int wait)
|
||||
void on_each_cpu(void (*func) (void *info), void *info, int wait)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
preempt_disable();
|
||||
ret = smp_call_function(func, info, wait);
|
||||
smp_call_function(func, info, wait);
|
||||
local_irq_save(flags);
|
||||
func(info);
|
||||
local_irq_restore(flags);
|
||||
preempt_enable();
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(on_each_cpu);
|
||||
|
||||
|
@@ -35,14 +35,13 @@ int smp_call_function_single_async(int cpu, call_single_data_t *csd)
|
||||
}
|
||||
EXPORT_SYMBOL(smp_call_function_single_async);
|
||||
|
||||
int on_each_cpu(smp_call_func_t func, void *info, int wait)
|
||||
void on_each_cpu(smp_call_func_t func, void *info, int wait)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
func(info);
|
||||
local_irq_restore(flags);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(on_each_cpu);
|
||||
|
||||
|
Reference in New Issue
Block a user