Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar: - massive CPU hotplug rework (Thomas Gleixner) - improve migration fairness (Peter Zijlstra) - CPU load calculation updates/cleanups (Yuyang Du) - cpufreq updates (Steve Muckle) - nohz optimizations (Frederic Weisbecker) - switch_mm() micro-optimization on x86 (Andy Lutomirski) - ... lots of other enhancements, fixes and cleanups. * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (66 commits) ARM: Hide finish_arch_post_lock_switch() from modules sched/core: Provide a tsk_nr_cpus_allowed() helper sched/core: Use tsk_cpus_allowed() instead of accessing ->cpus_allowed sched/loadavg: Fix loadavg artifacts on fully idle and on fully loaded systems sched/fair: Correct unit of load_above_capacity sched/fair: Clean up scale confusion sched/nohz: Fix affine unpinned timers mess sched/fair: Fix fairness issue on migration sched/core: Kill sched_class::task_waking to clean up the migration logic sched/fair: Prepare to fix fairness problems on migration sched/fair: Move record_wakee() sched/core: Fix comment typo in wake_q_add() sched/core: Remove unused variable sched: Make hrtick_notifier an explicit call sched/fair: Make ilb_notifier an explicit call sched/hotplug: Make activate() the last hotplug step sched/hotplug: Move migration CPU_DYING to sched_cpu_dying() sched/migration: Move CPU_ONLINE into scheduler state sched/migration: Move calc_load_migrate() into CPU_DYING sched/migration: Move prepare transition to SCHED_STARTING state ...
This commit is contained in:
@@ -2,7 +2,7 @@
|
||||
KCOV_INSTRUMENT_tlb.o := n
|
||||
|
||||
obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
|
||||
pat.o pgtable.o physaddr.o gup.o setup_nx.o
|
||||
pat.o pgtable.o physaddr.o gup.o setup_nx.o tlb.o
|
||||
|
||||
# Make sure __phys_addr has no stackprotector
|
||||
nostackp := $(call cc-option, -fno-stack-protector)
|
||||
@@ -12,7 +12,6 @@ CFLAGS_setup_nx.o := $(nostackp)
|
||||
CFLAGS_fault.o := -I$(src)/../include/asm/trace
|
||||
|
||||
obj-$(CONFIG_X86_PAT) += pat_rbtree.o
|
||||
obj-$(CONFIG_SMP) += tlb.o
|
||||
|
||||
obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o
|
||||
|
||||
|
@@ -28,6 +28,8 @@
|
||||
* Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
struct flush_tlb_info {
|
||||
struct mm_struct *flush_mm;
|
||||
unsigned long flush_start;
|
||||
@@ -57,6 +59,118 @@ void leave_mm(int cpu)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(leave_mm);
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
switch_mm_irqs_off(prev, next, tsk);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
unsigned cpu = smp_processor_id();
|
||||
|
||||
if (likely(prev != next)) {
|
||||
#ifdef CONFIG_SMP
|
||||
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
|
||||
this_cpu_write(cpu_tlbstate.active_mm, next);
|
||||
#endif
|
||||
cpumask_set_cpu(cpu, mm_cpumask(next));
|
||||
|
||||
/*
|
||||
* Re-load page tables.
|
||||
*
|
||||
* This logic has an ordering constraint:
|
||||
*
|
||||
* CPU 0: Write to a PTE for 'next'
|
||||
* CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI.
|
||||
* CPU 1: set bit 1 in next's mm_cpumask
|
||||
* CPU 1: load from the PTE that CPU 0 writes (implicit)
|
||||
*
|
||||
* We need to prevent an outcome in which CPU 1 observes
|
||||
* the new PTE value and CPU 0 observes bit 1 clear in
|
||||
* mm_cpumask. (If that occurs, then the IPI will never
|
||||
* be sent, and CPU 0's TLB will contain a stale entry.)
|
||||
*
|
||||
* The bad outcome can occur if either CPU's load is
|
||||
* reordered before that CPU's store, so both CPUs must
|
||||
* execute full barriers to prevent this from happening.
|
||||
*
|
||||
* Thus, switch_mm needs a full barrier between the
|
||||
* store to mm_cpumask and any operation that could load
|
||||
* from next->pgd. TLB fills are special and can happen
|
||||
* due to instruction fetches or for no reason at all,
|
||||
* and neither LOCK nor MFENCE orders them.
|
||||
* Fortunately, load_cr3() is serializing and gives the
|
||||
* ordering guarantee we need.
|
||||
*
|
||||
*/
|
||||
load_cr3(next->pgd);
|
||||
|
||||
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
|
||||
|
||||
/* Stop flush ipis for the previous mm */
|
||||
cpumask_clear_cpu(cpu, mm_cpumask(prev));
|
||||
|
||||
/* Load per-mm CR4 state */
|
||||
load_mm_cr4(next);
|
||||
|
||||
#ifdef CONFIG_MODIFY_LDT_SYSCALL
|
||||
/*
|
||||
* Load the LDT, if the LDT is different.
|
||||
*
|
||||
* It's possible that prev->context.ldt doesn't match
|
||||
* the LDT register. This can happen if leave_mm(prev)
|
||||
* was called and then modify_ldt changed
|
||||
* prev->context.ldt but suppressed an IPI to this CPU.
|
||||
* In this case, prev->context.ldt != NULL, because we
|
||||
* never set context.ldt to NULL while the mm still
|
||||
* exists. That means that next->context.ldt !=
|
||||
* prev->context.ldt, because mms never share an LDT.
|
||||
*/
|
||||
if (unlikely(prev->context.ldt != next->context.ldt))
|
||||
load_mm_ldt(next);
|
||||
#endif
|
||||
}
|
||||
#ifdef CONFIG_SMP
|
||||
else {
|
||||
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
|
||||
BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
|
||||
|
||||
if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
|
||||
/*
|
||||
* On established mms, the mm_cpumask is only changed
|
||||
* from irq context, from ptep_clear_flush() while in
|
||||
* lazy tlb mode, and here. Irqs are blocked during
|
||||
* schedule, protecting us from simultaneous changes.
|
||||
*/
|
||||
cpumask_set_cpu(cpu, mm_cpumask(next));
|
||||
|
||||
/*
|
||||
* We were in lazy tlb mode and leave_mm disabled
|
||||
* tlb flush IPI delivery. We must reload CR3
|
||||
* to make sure to use no freed page tables.
|
||||
*
|
||||
* As above, load_cr3() is serializing and orders TLB
|
||||
* fills with respect to the mm_cpumask write.
|
||||
*/
|
||||
load_cr3(next->pgd);
|
||||
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
|
||||
load_mm_cr4(next);
|
||||
load_mm_ldt(next);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
/*
|
||||
* The flush IPI assumes that a thread switch happens in this order:
|
||||
* [cpu0: the cpu that switches]
|
||||
@@ -353,3 +467,5 @@ static int __init create_tlb_single_page_flush_ceiling(void)
|
||||
return 0;
|
||||
}
|
||||
late_initcall(create_tlb_single_page_flush_ceiling);
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
Reference in New Issue
Block a user