Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
This commit is contained in:
11
kernel/cpu.c
11
kernel/cpu.c
@@ -607,15 +607,15 @@ static void cpuhp_thread_fun(unsigned int cpu)
|
||||
bool bringup = st->bringup;
|
||||
enum cpuhp_state state;
|
||||
|
||||
if (WARN_ON_ONCE(!st->should_run))
|
||||
return;
|
||||
|
||||
/*
|
||||
* ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
|
||||
* that if we see ->should_run we also see the rest of the state.
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
if (WARN_ON_ONCE(!st->should_run))
|
||||
return;
|
||||
|
||||
cpuhp_lock_acquire(bringup);
|
||||
|
||||
if (st->single) {
|
||||
@@ -916,7 +916,8 @@ static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
|
||||
ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
|
||||
if (ret) {
|
||||
st->target = prev_state;
|
||||
undo_cpu_down(cpu, st);
|
||||
if (st->state < prev_state)
|
||||
undo_cpu_down(cpu, st);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -969,7 +970,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
|
||||
* to do the further cleanups.
|
||||
*/
|
||||
ret = cpuhp_down_callbacks(cpu, st, target);
|
||||
if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
|
||||
if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) {
|
||||
cpuhp_reset_state(st, prev_state);
|
||||
__cpuhp_kick_ap(st);
|
||||
}
|
||||
|
@@ -550,8 +550,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
|
||||
goto out;
|
||||
}
|
||||
/* a new mm has just been created */
|
||||
arch_dup_mmap(oldmm, mm);
|
||||
retval = 0;
|
||||
retval = arch_dup_mmap(oldmm, mm);
|
||||
out:
|
||||
up_write(&mm->mmap_sem);
|
||||
flush_tlb_mm(oldmm);
|
||||
|
@@ -306,12 +306,12 @@ static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
|
||||
return printk_safe_log_store(s, fmt, args);
|
||||
}
|
||||
|
||||
void printk_nmi_enter(void)
|
||||
void notrace printk_nmi_enter(void)
|
||||
{
|
||||
this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
|
||||
}
|
||||
|
||||
void printk_nmi_exit(void)
|
||||
void notrace printk_nmi_exit(void)
|
||||
{
|
||||
this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK);
|
||||
}
|
||||
|
@@ -133,19 +133,40 @@ static void inline clocksource_watchdog_unlock(unsigned long *flags)
|
||||
spin_unlock_irqrestore(&watchdog_lock, *flags);
|
||||
}
|
||||
|
||||
static int clocksource_watchdog_kthread(void *data);
|
||||
static void __clocksource_change_rating(struct clocksource *cs, int rating);
|
||||
|
||||
/*
|
||||
* Interval: 0.5sec Threshold: 0.0625s
|
||||
*/
|
||||
#define WATCHDOG_INTERVAL (HZ >> 1)
|
||||
#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
|
||||
|
||||
static void clocksource_watchdog_work(struct work_struct *work)
|
||||
{
|
||||
/*
|
||||
* We cannot directly run clocksource_watchdog_kthread() here, because
|
||||
* clocksource_select() calls timekeeping_notify() which uses
|
||||
* stop_machine(). One cannot use stop_machine() from a workqueue() due
|
||||
* lock inversions wrt CPU hotplug.
|
||||
*
|
||||
* Also, we only ever run this work once or twice during the lifetime
|
||||
* of the kernel, so there is no point in creating a more permanent
|
||||
* kthread for this.
|
||||
*
|
||||
* If kthread_run fails the next watchdog scan over the
|
||||
* watchdog_list will find the unstable clock again.
|
||||
*/
|
||||
kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
|
||||
}
|
||||
|
||||
static void __clocksource_unstable(struct clocksource *cs)
|
||||
{
|
||||
cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
|
||||
cs->flags |= CLOCK_SOURCE_UNSTABLE;
|
||||
|
||||
/*
|
||||
* If the clocksource is registered clocksource_watchdog_work() will
|
||||
* If the clocksource is registered clocksource_watchdog_kthread() will
|
||||
* re-rate and re-select.
|
||||
*/
|
||||
if (list_empty(&cs->list)) {
|
||||
@@ -156,7 +177,7 @@ static void __clocksource_unstable(struct clocksource *cs)
|
||||
if (cs->mark_unstable)
|
||||
cs->mark_unstable(cs);
|
||||
|
||||
/* kick clocksource_watchdog_work() */
|
||||
/* kick clocksource_watchdog_kthread() */
|
||||
if (finished_booting)
|
||||
schedule_work(&watchdog_work);
|
||||
}
|
||||
@@ -166,7 +187,7 @@ static void __clocksource_unstable(struct clocksource *cs)
|
||||
* @cs: clocksource to be marked unstable
|
||||
*
|
||||
* This function is called by the x86 TSC code to mark clocksources as unstable;
|
||||
* it defers demotion and re-selection to a work.
|
||||
* it defers demotion and re-selection to a kthread.
|
||||
*/
|
||||
void clocksource_mark_unstable(struct clocksource *cs)
|
||||
{
|
||||
@@ -391,9 +412,7 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs)
|
||||
}
|
||||
}
|
||||
|
||||
static void __clocksource_change_rating(struct clocksource *cs, int rating);
|
||||
|
||||
static int __clocksource_watchdog_work(void)
|
||||
static int __clocksource_watchdog_kthread(void)
|
||||
{
|
||||
struct clocksource *cs, *tmp;
|
||||
unsigned long flags;
|
||||
@@ -418,12 +437,13 @@ static int __clocksource_watchdog_work(void)
|
||||
return select;
|
||||
}
|
||||
|
||||
static void clocksource_watchdog_work(struct work_struct *work)
|
||||
static int clocksource_watchdog_kthread(void *data)
|
||||
{
|
||||
mutex_lock(&clocksource_mutex);
|
||||
if (__clocksource_watchdog_work())
|
||||
if (__clocksource_watchdog_kthread())
|
||||
clocksource_select();
|
||||
mutex_unlock(&clocksource_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool clocksource_is_watchdog(struct clocksource *cs)
|
||||
@@ -442,7 +462,7 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
|
||||
static void clocksource_select_watchdog(bool fallback) { }
|
||||
static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
|
||||
static inline void clocksource_resume_watchdog(void) { }
|
||||
static inline int __clocksource_watchdog_work(void) { return 0; }
|
||||
static inline int __clocksource_watchdog_kthread(void) { return 0; }
|
||||
static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
|
||||
void clocksource_mark_unstable(struct clocksource *cs) { }
|
||||
|
||||
@@ -810,7 +830,7 @@ static int __init clocksource_done_booting(void)
|
||||
/*
|
||||
* Run the watchdog first to eliminate unstable clock sources
|
||||
*/
|
||||
__clocksource_watchdog_work();
|
||||
__clocksource_watchdog_kthread();
|
||||
clocksource_select();
|
||||
mutex_unlock(&clocksource_mutex);
|
||||
return 0;
|
||||
|
Reference in New Issue
Block a user