Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:
 "Misc fixes:

   - unwinder fixes
   - AMD CPU topology enumeration fixes
   - microcode loader fixes
   - x86 embedded platform fixes
   - fix for a bootup crash that may trigger when clearcpuid= is used
     with invalid values"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mpx: Use compatible types in comparison to fix sparse error
  x86/tsc: Add the Intel Denverton Processor to native_calibrate_tsc()
  x86/entry: Fix the end of the stack for newly forked tasks
  x86/unwind: Include __schedule() in stack traces
  x86/unwind: Disable KASAN checks for non-current tasks
  x86/unwind: Silence warnings for non-current tasks
  x86/microcode/intel: Use correct buffer size for saving microcode data
  x86/microcode/intel: Fix allocation size of struct ucode_patch
  x86/microcode/intel: Add a helper which gives the microcode revision
  x86/microcode: Use native CPUID to tickle out microcode revision
  x86/CPU: Add native CPUID variants returning a single datum
  x86/boot: Add missing declaration of string functions
  x86/CPU/AMD: Fix Bulldozer topology
  x86/platform/intel-mid: Rename 'spidev' to 'mrfld_spidev'
  x86/cpu: Fix typo in the comment for Anniedale
  x86/cpu: Fix bootup crashes by sanitizing the argument of the 'clearcpuid=' command-line option
Tento commit je obsažen v:
Linus Torvalds
2017-01-15 12:03:11 -08:00
18 změnil soubory, kde provedl 129 přidání a 100 odebrání

Zobrazit soubor

@@ -309,15 +309,8 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
/* get information required for multi-node processors */
if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
u32 eax, ebx, ecx, edx;
cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
node_id = ecx & 7;
/* get compute unit information */
smp_num_siblings = ((ebx >> 8) & 3) + 1;
c->x86_max_cores /= smp_num_siblings;
c->cpu_core_id = ebx & 0xff;
node_id = cpuid_ecx(0x8000001e) & 7;
/*
* We may have multiple LLCs if L3 caches exist, so check if we

Zobrazit soubor

@@ -1221,7 +1221,7 @@ static __init int setup_disablecpuid(char *arg)
{
int bit;
if (get_option(&arg, &bit) && bit < NCAPINTS*32)
if (get_option(&arg, &bit) && bit >= 0 && bit < NCAPINTS * 32)
setup_clear_cpu_cap(bit);
else
return 0;

Zobrazit soubor

@@ -14,6 +14,7 @@
#include <asm/bugs.h>
#include <asm/cpu.h>
#include <asm/intel-family.h>
#include <asm/microcode_intel.h>
#ifdef CONFIG_X86_64
#include <linux/topology.h>
@@ -78,14 +79,8 @@ static void early_init_intel(struct cpuinfo_x86 *c)
(c->x86 == 0x6 && c->x86_model >= 0x0e))
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) {
unsigned lower_word;
wrmsr(MSR_IA32_UCODE_REV, 0, 0);
/* Required by the SDM */
sync_core();
rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode);
}
if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
c->microcode = intel_get_microcode_revision();
/*
* Atom erratum AAE44/AAF40/AAG38/AAH41:

Zobrazit soubor

@@ -150,7 +150,7 @@ static struct ucode_patch *__alloc_microcode_buf(void *data, unsigned int size)
{
struct ucode_patch *p;
p = kzalloc(size, GFP_KERNEL);
p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL);
if (!p)
return ERR_PTR(-ENOMEM);
@@ -368,26 +368,6 @@ next:
return patch;
}
static void cpuid_1(void)
{
/*
* According to the Intel SDM, Volume 3, 9.11.7:
*
* CPUID returns a value in a model specific register in
* addition to its usual register return values. The
* semantics of CPUID cause it to deposit an update ID value
* in the 64-bit model-specific register at address 08BH
* (IA32_BIOS_SIGN_ID). If no update is present in the
* processor, the value in the MSR remains unmodified.
*
* Use native_cpuid -- this code runs very early and we don't
* want to mess with paravirt.
*/
unsigned int eax = 1, ebx, ecx = 0, edx;
native_cpuid(&eax, &ebx, &ecx, &edx);
}
static int collect_cpu_info_early(struct ucode_cpu_info *uci)
{
unsigned int val[2];
@@ -410,15 +390,8 @@ static int collect_cpu_info_early(struct ucode_cpu_info *uci)
native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
csig.pf = 1 << ((val[1] >> 18) & 7);
}
native_wrmsrl(MSR_IA32_UCODE_REV, 0);
/* As documented in the SDM: Do a CPUID 1 here */
cpuid_1();
/* get the current revision from MSR 0x8B */
native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
csig.rev = val[1];
csig.rev = intel_get_microcode_revision();
uci->cpu_sig = csig;
uci->valid = 1;
@@ -602,7 +575,7 @@ static inline void print_ucode(struct ucode_cpu_info *uci)
static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
{
struct microcode_intel *mc;
unsigned int val[2];
u32 rev;
mc = uci->mc;
if (!mc)
@@ -610,21 +583,16 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
/* write microcode via MSR 0x79 */
native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
native_wrmsrl(MSR_IA32_UCODE_REV, 0);
/* As documented in the SDM: Do a CPUID 1 here */
cpuid_1();
/* get the current revision from MSR 0x8B */
native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
if (val[1] != mc->hdr.rev)
rev = intel_get_microcode_revision();
if (rev != mc->hdr.rev)
return -1;
#ifdef CONFIG_X86_64
/* Flush global tlb. This is precaution. */
flush_tlb_early();
#endif
uci->cpu_sig.rev = val[1];
uci->cpu_sig.rev = rev;
if (early)
print_ucode(uci);
@@ -804,8 +772,8 @@ static int apply_microcode_intel(int cpu)
struct microcode_intel *mc;
struct ucode_cpu_info *uci;
struct cpuinfo_x86 *c;
unsigned int val[2];
static int prev_rev;
u32 rev;
/* We should bind the task to the CPU */
if (WARN_ON(raw_smp_processor_id() != cpu))
@@ -822,33 +790,28 @@ static int apply_microcode_intel(int cpu)
/* write microcode via MSR 0x79 */
wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
wrmsrl(MSR_IA32_UCODE_REV, 0);
/* As documented in the SDM: Do a CPUID 1 here */
cpuid_1();
rev = intel_get_microcode_revision();
/* get the current revision from MSR 0x8B */
rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
if (val[1] != mc->hdr.rev) {
if (rev != mc->hdr.rev) {
pr_err("CPU%d update to revision 0x%x failed\n",
cpu, mc->hdr.rev);
return -1;
}
if (val[1] != prev_rev) {
if (rev != prev_rev) {
pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n",
val[1],
rev,
mc->hdr.date & 0xffff,
mc->hdr.date >> 24,
(mc->hdr.date >> 16) & 0xff);
prev_rev = val[1];
prev_rev = rev;
}
c = &cpu_data(cpu);
uci->cpu_sig.rev = val[1];
c->microcode = val[1];
uci->cpu_sig.rev = rev;
c->microcode = rev;
return 0;
}
@@ -860,7 +823,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
u8 *ucode_ptr = data, *new_mc = NULL, *mc = NULL;
int new_rev = uci->cpu_sig.rev;
unsigned int leftover = size;
unsigned int curr_mc_size = 0;
unsigned int curr_mc_size = 0, new_mc_size = 0;
unsigned int csig, cpf;
while (leftover) {
@@ -901,6 +864,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
vfree(new_mc);
new_rev = mc_header.rev;
new_mc = mc;
new_mc_size = mc_size;
mc = NULL; /* trigger new vmalloc */
}
@@ -926,7 +890,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
* permanent memory. So it will be loaded early when a CPU is hot added
* or resumes.
*/
save_mc_for_early(new_mc, curr_mc_size);
save_mc_for_early(new_mc, new_mc_size);
pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
cpu, new_rev, uci->cpu_sig.rev);

Zobrazit soubor

@@ -694,6 +694,7 @@ unsigned long native_calibrate_tsc(void)
crystal_khz = 24000; /* 24.0 MHz */
break;
case INTEL_FAM6_SKYLAKE_X:
case INTEL_FAM6_ATOM_DENVERTON:
crystal_khz = 25000; /* 25.0 MHz */
break;
case INTEL_FAM6_ATOM_GOLDMONT:

Zobrazit soubor

@@ -6,6 +6,21 @@
#define FRAME_HEADER_SIZE (sizeof(long) * 2)
/*
* This disables KASAN checking when reading a value from another task's stack,
* since the other task could be running on another CPU and could have poisoned
* the stack in the meantime.
*/
#define READ_ONCE_TASK_STACK(task, x) \
({ \
unsigned long val; \
if (task == current) \
val = READ_ONCE(x); \
else \
val = READ_ONCE_NOCHECK(x); \
val; \
})
static void unwind_dump(struct unwind_state *state, unsigned long *sp)
{
static bool dumped_before = false;
@@ -48,7 +63,8 @@ unsigned long unwind_get_return_address(struct unwind_state *state)
if (state->regs && user_mode(state->regs))
return 0;
addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, *addr_p,
addr = READ_ONCE_TASK_STACK(state->task, *addr_p);
addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, addr,
addr_p);
return __kernel_text_address(addr) ? addr : 0;
@@ -162,7 +178,7 @@ bool unwind_next_frame(struct unwind_state *state)
if (state->regs)
next_bp = (unsigned long *)state->regs->bp;
else
next_bp = (unsigned long *)*state->bp;
next_bp = (unsigned long *)READ_ONCE_TASK_STACK(state->task,*state->bp);
/* is the next frame pointer an encoded pointer to pt_regs? */
regs = decode_frame_pointer(next_bp);
@@ -207,6 +223,16 @@ bool unwind_next_frame(struct unwind_state *state)
return true;
bad_address:
/*
* When unwinding a non-current task, the task might actually be
* running on another CPU, in which case it could be modifying its
* stack while we're reading it. This is generally not a problem and
* can be ignored as long as the caller understands that unwinding
* another task will not always succeed.
*/
if (state->task != current)
goto the_end;
if (state->regs) {
printk_deferred_once(KERN_WARNING
"WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n",