Merge branch 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 cpu updates from Ingo Molnar: "The main changes in this cycle were: - Improved CPU ID handling code and related enhancements (Borislav Petkov) - RDRAND fix (Len Brown)" * 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86: Replace RDRAND forced-reseed with simple sanity check x86/MSR: Chop off lower 32-bit value x86/cpu: Fix MSR value truncation issue x86/cpu/amd, kvm: Satisfy guest kernel reads of IC_CFG MSR kvm: Add accessors for guest CPU's family, model, stepping x86/cpu: Unify CPU family, model, stepping calculation
This commit is contained in:
@@ -36,4 +36,7 @@ extern int _debug_hotplug_cpu(int cpu, int action);
|
|||||||
|
|
||||||
int mwait_usable(const struct cpuinfo_x86 *);
|
int mwait_usable(const struct cpuinfo_x86 *);
|
||||||
|
|
||||||
|
unsigned int x86_family(unsigned int sig);
|
||||||
|
unsigned int x86_model(unsigned int sig);
|
||||||
|
unsigned int x86_stepping(unsigned int sig);
|
||||||
#endif /* _ASM_X86_CPU_H */
|
#endif /* _ASM_X86_CPU_H */
|
||||||
|
@@ -1,6 +1,7 @@
|
|||||||
#ifndef _ASM_X86_MICROCODE_H
|
#ifndef _ASM_X86_MICROCODE_H
|
||||||
#define _ASM_X86_MICROCODE_H
|
#define _ASM_X86_MICROCODE_H
|
||||||
|
|
||||||
|
#include <asm/cpu.h>
|
||||||
#include <linux/earlycpio.h>
|
#include <linux/earlycpio.h>
|
||||||
|
|
||||||
#define native_rdmsr(msr, val1, val2) \
|
#define native_rdmsr(msr, val1, val2) \
|
||||||
@@ -95,14 +96,14 @@ static inline void __exit exit_amd_microcode(void) {}
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* In early loading microcode phase on BSP, boot_cpu_data is not set up yet.
|
* In early loading microcode phase on BSP, boot_cpu_data is not set up yet.
|
||||||
* x86_vendor() gets vendor id for BSP.
|
* x86_cpuid_vendor() gets vendor id for BSP.
|
||||||
*
|
*
|
||||||
* In 32 bit AP case, accessing boot_cpu_data needs linear address. To simplify
|
* In 32 bit AP case, accessing boot_cpu_data needs linear address. To simplify
|
||||||
* coding, we still use x86_vendor() to get vendor id for AP.
|
* coding, we still use x86_cpuid_vendor() to get vendor id for AP.
|
||||||
*
|
*
|
||||||
* x86_vendor() gets vendor information directly from CPUID.
|
* x86_cpuid_vendor() gets vendor information directly from CPUID.
|
||||||
*/
|
*/
|
||||||
static inline int x86_vendor(void)
|
static inline int x86_cpuid_vendor(void)
|
||||||
{
|
{
|
||||||
u32 eax = 0x00000000;
|
u32 eax = 0x00000000;
|
||||||
u32 ebx, ecx = 0, edx;
|
u32 ebx, ecx = 0, edx;
|
||||||
@@ -118,40 +119,14 @@ static inline int x86_vendor(void)
|
|||||||
return X86_VENDOR_UNKNOWN;
|
return X86_VENDOR_UNKNOWN;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned int __x86_family(unsigned int sig)
|
static inline unsigned int x86_cpuid_family(void)
|
||||||
{
|
|
||||||
unsigned int x86;
|
|
||||||
|
|
||||||
x86 = (sig >> 8) & 0xf;
|
|
||||||
|
|
||||||
if (x86 == 0xf)
|
|
||||||
x86 += (sig >> 20) & 0xff;
|
|
||||||
|
|
||||||
return x86;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned int x86_family(void)
|
|
||||||
{
|
{
|
||||||
u32 eax = 0x00000001;
|
u32 eax = 0x00000001;
|
||||||
u32 ebx, ecx = 0, edx;
|
u32 ebx, ecx = 0, edx;
|
||||||
|
|
||||||
native_cpuid(&eax, &ebx, &ecx, &edx);
|
native_cpuid(&eax, &ebx, &ecx, &edx);
|
||||||
|
|
||||||
return __x86_family(eax);
|
return x86_family(eax);
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned int x86_model(unsigned int sig)
|
|
||||||
{
|
|
||||||
unsigned int x86, model;
|
|
||||||
|
|
||||||
x86 = __x86_family(sig);
|
|
||||||
|
|
||||||
model = (sig >> 4) & 0xf;
|
|
||||||
|
|
||||||
if (x86 == 0x6 || x86 == 0xf)
|
|
||||||
model += ((sig >> 16) & 0xf) << 4;
|
|
||||||
|
|
||||||
return model;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MICROCODE
|
#ifdef CONFIG_MICROCODE
|
||||||
|
@@ -321,6 +321,7 @@
|
|||||||
#define MSR_F15H_PERF_CTR 0xc0010201
|
#define MSR_F15H_PERF_CTR 0xc0010201
|
||||||
#define MSR_F15H_NB_PERF_CTL 0xc0010240
|
#define MSR_F15H_NB_PERF_CTL 0xc0010240
|
||||||
#define MSR_F15H_NB_PERF_CTR 0xc0010241
|
#define MSR_F15H_NB_PERF_CTR 0xc0010241
|
||||||
|
#define MSR_F15H_IC_CFG 0xc0011021
|
||||||
|
|
||||||
/* Fam 10h MSRs */
|
/* Fam 10h MSRs */
|
||||||
#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058
|
#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058
|
||||||
|
@@ -221,7 +221,7 @@ static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
|
|||||||
|
|
||||||
static inline void wrmsrl(unsigned msr, u64 val)
|
static inline void wrmsrl(unsigned msr, u64 val)
|
||||||
{
|
{
|
||||||
native_write_msr(msr, (u32)val, (u32)(val >> 32));
|
native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* wrmsr with exception handling */
|
/* wrmsr with exception handling */
|
||||||
|
@@ -678,9 +678,9 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
|
|||||||
* Disable it on the affected CPUs.
|
* Disable it on the affected CPUs.
|
||||||
*/
|
*/
|
||||||
if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
|
if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
|
||||||
if (!rdmsrl_safe(0xc0011021, &value) && !(value & 0x1E)) {
|
if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
|
||||||
value |= 0x1E;
|
value |= 0x1E;
|
||||||
wrmsrl_safe(0xc0011021, value);
|
wrmsrl_safe(MSR_F15H_IC_CFG, value);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -581,14 +581,9 @@ void cpu_detect(struct cpuinfo_x86 *c)
|
|||||||
u32 junk, tfms, cap0, misc;
|
u32 junk, tfms, cap0, misc;
|
||||||
|
|
||||||
cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
|
cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
|
||||||
c->x86 = (tfms >> 8) & 0xf;
|
c->x86 = x86_family(tfms);
|
||||||
c->x86_model = (tfms >> 4) & 0xf;
|
c->x86_model = x86_model(tfms);
|
||||||
c->x86_mask = tfms & 0xf;
|
c->x86_mask = x86_stepping(tfms);
|
||||||
|
|
||||||
if (c->x86 == 0xf)
|
|
||||||
c->x86 += (tfms >> 20) & 0xff;
|
|
||||||
if (c->x86 >= 0x6)
|
|
||||||
c->x86_model += ((tfms >> 16) & 0xf) << 4;
|
|
||||||
|
|
||||||
if (cap0 & (1<<19)) {
|
if (cap0 & (1<<19)) {
|
||||||
c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
|
c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
|
||||||
@@ -1187,7 +1182,7 @@ void syscall_init(void)
|
|||||||
* They both write to the same internal register. STAR allows to
|
* They both write to the same internal register. STAR allows to
|
||||||
* set CS/DS but only a 32bit target. LSTAR sets the 64bit rip.
|
* set CS/DS but only a 32bit target. LSTAR sets the 64bit rip.
|
||||||
*/
|
*/
|
||||||
wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
|
wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
|
||||||
wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
|
wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
|
||||||
|
|
||||||
#ifdef CONFIG_IA32_EMULATION
|
#ifdef CONFIG_IA32_EMULATION
|
||||||
|
@@ -129,8 +129,8 @@ void __init load_ucode_bsp(void)
|
|||||||
if (!have_cpuid_p())
|
if (!have_cpuid_p())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
vendor = x86_vendor();
|
vendor = x86_cpuid_vendor();
|
||||||
family = x86_family();
|
family = x86_cpuid_family();
|
||||||
|
|
||||||
switch (vendor) {
|
switch (vendor) {
|
||||||
case X86_VENDOR_INTEL:
|
case X86_VENDOR_INTEL:
|
||||||
@@ -165,8 +165,8 @@ void load_ucode_ap(void)
|
|||||||
if (!have_cpuid_p())
|
if (!have_cpuid_p())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
vendor = x86_vendor();
|
vendor = x86_cpuid_vendor();
|
||||||
family = x86_family();
|
family = x86_cpuid_family();
|
||||||
|
|
||||||
switch (vendor) {
|
switch (vendor) {
|
||||||
case X86_VENDOR_INTEL:
|
case X86_VENDOR_INTEL:
|
||||||
@@ -206,8 +206,8 @@ void reload_early_microcode(void)
|
|||||||
{
|
{
|
||||||
int vendor, family;
|
int vendor, family;
|
||||||
|
|
||||||
vendor = x86_vendor();
|
vendor = x86_cpuid_vendor();
|
||||||
family = x86_family();
|
family = x86_cpuid_family();
|
||||||
|
|
||||||
switch (vendor) {
|
switch (vendor) {
|
||||||
case X86_VENDOR_INTEL:
|
case X86_VENDOR_INTEL:
|
||||||
|
@@ -145,10 +145,10 @@ matching_model_microcode(struct microcode_header_intel *mc_header,
|
|||||||
int ext_sigcount, i;
|
int ext_sigcount, i;
|
||||||
struct extended_signature *ext_sig;
|
struct extended_signature *ext_sig;
|
||||||
|
|
||||||
fam = __x86_family(sig);
|
fam = x86_family(sig);
|
||||||
model = x86_model(sig);
|
model = x86_model(sig);
|
||||||
|
|
||||||
fam_ucode = __x86_family(mc_header->sig);
|
fam_ucode = x86_family(mc_header->sig);
|
||||||
model_ucode = x86_model(mc_header->sig);
|
model_ucode = x86_model(mc_header->sig);
|
||||||
|
|
||||||
if (fam == fam_ucode && model == model_ucode)
|
if (fam == fam_ucode && model == model_ucode)
|
||||||
@@ -163,7 +163,7 @@ matching_model_microcode(struct microcode_header_intel *mc_header,
|
|||||||
ext_sigcount = ext_header->count;
|
ext_sigcount = ext_header->count;
|
||||||
|
|
||||||
for (i = 0; i < ext_sigcount; i++) {
|
for (i = 0; i < ext_sigcount; i++) {
|
||||||
fam_ucode = __x86_family(ext_sig->sig);
|
fam_ucode = x86_family(ext_sig->sig);
|
||||||
model_ucode = x86_model(ext_sig->sig);
|
model_ucode = x86_model(ext_sig->sig);
|
||||||
|
|
||||||
if (fam == fam_ucode && model == model_ucode)
|
if (fam == fam_ucode && model == model_ucode)
|
||||||
@@ -365,7 +365,7 @@ static int collect_cpu_info_early(struct ucode_cpu_info *uci)
|
|||||||
native_cpuid(&eax, &ebx, &ecx, &edx);
|
native_cpuid(&eax, &ebx, &ecx, &edx);
|
||||||
csig.sig = eax;
|
csig.sig = eax;
|
||||||
|
|
||||||
family = __x86_family(csig.sig);
|
family = x86_family(csig.sig);
|
||||||
model = x86_model(csig.sig);
|
model = x86_model(csig.sig);
|
||||||
|
|
||||||
if ((model >= 5) || (family > 6)) {
|
if ((model >= 5) || (family > 6)) {
|
||||||
@@ -521,16 +521,12 @@ static bool __init load_builtin_intel_microcode(struct cpio_data *cp)
|
|||||||
{
|
{
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
unsigned int eax = 0x00000001, ebx, ecx = 0, edx;
|
unsigned int eax = 0x00000001, ebx, ecx = 0, edx;
|
||||||
unsigned int family, model, stepping;
|
|
||||||
char name[30];
|
char name[30];
|
||||||
|
|
||||||
native_cpuid(&eax, &ebx, &ecx, &edx);
|
native_cpuid(&eax, &ebx, &ecx, &edx);
|
||||||
|
|
||||||
family = __x86_family(eax);
|
sprintf(name, "intel-ucode/%02x-%02x-%02x",
|
||||||
model = x86_model(eax);
|
x86_family(eax), x86_model(eax), x86_stepping(eax));
|
||||||
stepping = eax & 0xf;
|
|
||||||
|
|
||||||
sprintf(name, "intel-ucode/%02x-%02x-%02x", family, model, stepping);
|
|
||||||
|
|
||||||
return get_builtin_firmware(cp, name);
|
return get_builtin_firmware(cp, name);
|
||||||
#else
|
#else
|
||||||
|
@@ -33,28 +33,27 @@ static int __init x86_rdrand_setup(char *s)
|
|||||||
__setup("nordrand", x86_rdrand_setup);
|
__setup("nordrand", x86_rdrand_setup);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Force a reseed cycle; we are architecturally guaranteed a reseed
|
* RDRAND has Built-In-Self-Test (BIST) that runs on every invocation.
|
||||||
* after no more than 512 128-bit chunks of random data. This also
|
* Run the instruction a few times as a sanity check.
|
||||||
* acts as a test of the CPU capability.
|
* If it fails, it is simple to disable RDRAND here.
|
||||||
*/
|
*/
|
||||||
#define RESEED_LOOP ((512*128)/sizeof(unsigned long))
|
#define SANITY_CHECK_LOOPS 8
|
||||||
|
|
||||||
void x86_init_rdrand(struct cpuinfo_x86 *c)
|
void x86_init_rdrand(struct cpuinfo_x86 *c)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_ARCH_RANDOM
|
#ifdef CONFIG_ARCH_RANDOM
|
||||||
unsigned long tmp;
|
unsigned long tmp;
|
||||||
int i, count, ok;
|
int i;
|
||||||
|
|
||||||
if (!cpu_has(c, X86_FEATURE_RDRAND))
|
if (!cpu_has(c, X86_FEATURE_RDRAND))
|
||||||
return; /* Nothing to do */
|
return;
|
||||||
|
|
||||||
for (count = i = 0; i < RESEED_LOOP; i++) {
|
for (i = 0; i < SANITY_CHECK_LOOPS; i++) {
|
||||||
ok = rdrand_long(&tmp);
|
if (!rdrand_long(&tmp)) {
|
||||||
if (ok)
|
|
||||||
count++;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (count != RESEED_LOOP)
|
|
||||||
clear_cpu_cap(c, X86_FEATURE_RDRAND);
|
clear_cpu_cap(c, X86_FEATURE_RDRAND);
|
||||||
|
printk_once(KERN_WARNING "rdrand: disabled\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@@ -2,6 +2,7 @@
|
|||||||
#define ARCH_X86_KVM_CPUID_H
|
#define ARCH_X86_KVM_CPUID_H
|
||||||
|
|
||||||
#include "x86.h"
|
#include "x86.h"
|
||||||
|
#include <asm/cpu.h>
|
||||||
|
|
||||||
int kvm_update_cpuid(struct kvm_vcpu *vcpu);
|
int kvm_update_cpuid(struct kvm_vcpu *vcpu);
|
||||||
struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
|
struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
|
||||||
@@ -178,4 +179,37 @@ static inline bool guest_cpuid_has_nrips(struct kvm_vcpu *vcpu)
|
|||||||
}
|
}
|
||||||
#undef BIT_NRIPS
|
#undef BIT_NRIPS
|
||||||
|
|
||||||
|
static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
struct kvm_cpuid_entry2 *best;
|
||||||
|
|
||||||
|
best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
|
||||||
|
if (!best)
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
return x86_family(best->eax);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int guest_cpuid_model(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
struct kvm_cpuid_entry2 *best;
|
||||||
|
|
||||||
|
best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
|
||||||
|
if (!best)
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
return x86_model(best->eax);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
struct kvm_cpuid_entry2 *best;
|
||||||
|
|
||||||
|
best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
|
||||||
|
if (!best)
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
return x86_stepping(best->eax);
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@@ -3053,6 +3053,23 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||||||
case MSR_IA32_UCODE_REV:
|
case MSR_IA32_UCODE_REV:
|
||||||
msr_info->data = 0x01000065;
|
msr_info->data = 0x01000065;
|
||||||
break;
|
break;
|
||||||
|
case MSR_F15H_IC_CFG: {
|
||||||
|
|
||||||
|
int family, model;
|
||||||
|
|
||||||
|
family = guest_cpuid_family(vcpu);
|
||||||
|
model = guest_cpuid_model(vcpu);
|
||||||
|
|
||||||
|
if (family < 0 || model < 0)
|
||||||
|
return kvm_get_msr_common(vcpu, msr_info);
|
||||||
|
|
||||||
|
msr_info->data = 0;
|
||||||
|
|
||||||
|
if (family == 0x15 &&
|
||||||
|
(model >= 0x2 && model < 0x20))
|
||||||
|
msr_info->data = 0x1E;
|
||||||
|
}
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
return kvm_get_msr_common(vcpu, msr_info);
|
return kvm_get_msr_common(vcpu, msr_info);
|
||||||
}
|
}
|
||||||
|
@@ -16,7 +16,7 @@ clean-files := inat-tables.c
|
|||||||
|
|
||||||
obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
|
obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
|
||||||
|
|
||||||
lib-y := delay.o misc.o cmdline.o
|
lib-y := delay.o misc.o cmdline.o cpu.o
|
||||||
lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o
|
lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o
|
||||||
lib-y += memcpy_$(BITS).o
|
lib-y += memcpy_$(BITS).o
|
||||||
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
|
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
|
||||||
|
35
arch/x86/lib/cpu.c
Normal file
35
arch/x86/lib/cpu.c
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
#include <linux/module.h>
|
||||||
|
|
||||||
|
unsigned int x86_family(unsigned int sig)
|
||||||
|
{
|
||||||
|
unsigned int x86;
|
||||||
|
|
||||||
|
x86 = (sig >> 8) & 0xf;
|
||||||
|
|
||||||
|
if (x86 == 0xf)
|
||||||
|
x86 += (sig >> 20) & 0xff;
|
||||||
|
|
||||||
|
return x86;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(x86_family);
|
||||||
|
|
||||||
|
unsigned int x86_model(unsigned int sig)
|
||||||
|
{
|
||||||
|
unsigned int fam, model;
|
||||||
|
|
||||||
|
fam = x86_family(sig);
|
||||||
|
|
||||||
|
model = (sig >> 4) & 0xf;
|
||||||
|
|
||||||
|
if (fam >= 0x6)
|
||||||
|
model += ((sig >> 16) & 0xf) << 4;
|
||||||
|
|
||||||
|
return model;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(x86_model);
|
||||||
|
|
||||||
|
unsigned int x86_stepping(unsigned int sig)
|
||||||
|
{
|
||||||
|
return sig & 0xf;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(x86_stepping);
|
Reference in New Issue
Block a user