Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Ingo Molnar: "Misc fixes all across the map: - /proc/kcore vsyscall related fixes - LTO fix - build warning fix - CPU hotplug fix - Kconfig NR_CPUS cleanups - cpu_has() cleanups/robustification - .gitignore fix - memory-failure unmapping fix - UV platform fix" * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/mm, mm/hwpoison: Don't unconditionally unmap kernel 1:1 pages x86/error_inject: Make just_return_func() globally visible x86/platform/UV: Fix GAM Range Table entries less than 1GB x86/build: Add arch/x86/tools/insn_decoder_test to .gitignore x86/smpboot: Fix uncore_pci_remove() indexing bug when hot-removing a physical CPU x86/mm/kcore: Add vsyscall page to /proc/kcore conditionally vfs/proc/kcore, x86/mm/kcore: Fix SMAP fault when dumping vsyscall user page x86/Kconfig: Further simplify the NR_CPUS config x86/Kconfig: Simplify NR_CPUS config x86/MCE: Fix build warning introduced by "x86: do not use print_symbol()" x86/cpufeature: Update _static_cpu_has() to use all named variables x86/cpufeature: Reindent _static_cpu_has()
This commit is contained in:
@@ -1176,16 +1176,25 @@ static void __init decode_gam_rng_tbl(unsigned long ptr)
|
||||
|
||||
uv_gre_table = gre;
|
||||
for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) {
|
||||
unsigned long size = ((unsigned long)(gre->limit - lgre)
|
||||
<< UV_GAM_RANGE_SHFT);
|
||||
int order = 0;
|
||||
char suffix[] = " KMGTPE";
|
||||
|
||||
while (size > 9999 && order < sizeof(suffix)) {
|
||||
size /= 1024;
|
||||
order++;
|
||||
}
|
||||
|
||||
if (!index) {
|
||||
pr_info("UV: GAM Range Table...\n");
|
||||
pr_info("UV: # %20s %14s %5s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN");
|
||||
}
|
||||
pr_info("UV: %2d: 0x%014lx-0x%014lx %5luG %3d %04x %02x %02x\n",
|
||||
pr_info("UV: %2d: 0x%014lx-0x%014lx %5lu%c %3d %04x %02x %02x\n",
|
||||
index++,
|
||||
(unsigned long)lgre << UV_GAM_RANGE_SHFT,
|
||||
(unsigned long)gre->limit << UV_GAM_RANGE_SHFT,
|
||||
((unsigned long)(gre->limit - lgre)) >>
|
||||
(30 - UV_GAM_RANGE_SHFT), /* 64M -> 1G */
|
||||
size, suffix[order],
|
||||
gre->type, gre->nasid, gre->sockid, gre->pnode);
|
||||
|
||||
lgre = gre->limit;
|
||||
|
@@ -115,4 +115,19 @@ static inline void mce_unregister_injector_chain(struct notifier_block *nb) { }
|
||||
|
||||
extern struct mca_config mca_cfg;
|
||||
|
||||
#ifndef CONFIG_X86_64
|
||||
/*
|
||||
* On 32-bit systems it would be difficult to safely unmap a poison page
|
||||
* from the kernel 1:1 map because there are no non-canonical addresses that
|
||||
* we can use to refer to the address without risking a speculative access.
|
||||
* However, this isn't much of an issue because:
|
||||
* 1) Few unmappable pages are in the 1:1 map. Most are in HIGHMEM which
|
||||
* are only mapped into the kernel as needed
|
||||
* 2) Few people would run a 32-bit kernel on a machine that supports
|
||||
* recoverable errors because they have too much memory to boot 32-bit.
|
||||
*/
|
||||
static inline void mce_unmap_kpfn(unsigned long pfn) {}
|
||||
#define mce_unmap_kpfn mce_unmap_kpfn
|
||||
#endif
|
||||
|
||||
#endif /* __X86_MCE_INTERNAL_H__ */
|
||||
|
@@ -105,6 +105,10 @@ static struct irq_work mce_irq_work;
|
||||
|
||||
static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
|
||||
|
||||
#ifndef mce_unmap_kpfn
|
||||
static void mce_unmap_kpfn(unsigned long pfn);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* CPU/chipset specific EDAC code can register a notifier call here to print
|
||||
* MCE errors in a human-readable form.
|
||||
@@ -234,7 +238,7 @@ static void __print_mce(struct mce *m)
|
||||
m->cs, m->ip);
|
||||
|
||||
if (m->cs == __KERNEL_CS)
|
||||
pr_cont("{%pS}", (void *)m->ip);
|
||||
pr_cont("{%pS}", (void *)(unsigned long)m->ip);
|
||||
pr_cont("\n");
|
||||
}
|
||||
|
||||
@@ -590,7 +594,8 @@ static int srao_decode_notifier(struct notifier_block *nb, unsigned long val,
|
||||
|
||||
if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) {
|
||||
pfn = mce->addr >> PAGE_SHIFT;
|
||||
memory_failure(pfn, 0);
|
||||
if (!memory_failure(pfn, 0))
|
||||
mce_unmap_kpfn(pfn);
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
@@ -1057,12 +1062,13 @@ static int do_memory_failure(struct mce *m)
|
||||
ret = memory_failure(m->addr >> PAGE_SHIFT, flags);
|
||||
if (ret)
|
||||
pr_err("Memory error not recovered");
|
||||
else
|
||||
mce_unmap_kpfn(m->addr >> PAGE_SHIFT);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if defined(arch_unmap_kpfn) && defined(CONFIG_MEMORY_FAILURE)
|
||||
|
||||
void arch_unmap_kpfn(unsigned long pfn)
|
||||
#ifndef mce_unmap_kpfn
|
||||
static void mce_unmap_kpfn(unsigned long pfn)
|
||||
{
|
||||
unsigned long decoy_addr;
|
||||
|
||||
@@ -1073,7 +1079,7 @@ void arch_unmap_kpfn(unsigned long pfn)
|
||||
* We would like to just call:
|
||||
* set_memory_np((unsigned long)pfn_to_kaddr(pfn), 1);
|
||||
* but doing that would radically increase the odds of a
|
||||
* speculative access to the posion page because we'd have
|
||||
* speculative access to the poison page because we'd have
|
||||
* the virtual address of the kernel 1:1 mapping sitting
|
||||
* around in registers.
|
||||
* Instead we get tricky. We create a non-canonical address
|
||||
@@ -1098,7 +1104,6 @@ void arch_unmap_kpfn(unsigned long pfn)
|
||||
|
||||
if (set_memory_np(decoy_addr, 1))
|
||||
pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@@ -1430,7 +1430,6 @@ static void remove_siblinginfo(int cpu)
|
||||
cpumask_clear(cpu_llc_shared_mask(cpu));
|
||||
cpumask_clear(topology_sibling_cpumask(cpu));
|
||||
cpumask_clear(topology_core_cpumask(cpu));
|
||||
c->phys_proc_id = 0;
|
||||
c->cpu_core_id = 0;
|
||||
cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
|
||||
recompute_smt_state();
|
||||
|
Reference in New Issue
Block a user