Merge branch 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 PTI and Spectre related fixes and updates from Ingo Molnar: "Here's the latest set of Spectre and PTI related fixes and updates: Spectre: - Add entry code register clearing to reduce the Spectre attack surface - Update the Spectre microcode blacklist - Inline the KVM Spectre helpers to get close to v4.14 performance again. - Fix indirect_branch_prediction_barrier() - Fix/improve Spectre related kernel messages - Fix array_index_nospec_mask() asm constraint - KVM: fix two MSR handling bugs PTI: - Fix a paranoid entry PTI CR3 handling bug - Fix comments objtool: - Fix paranoid_entry() frame pointer warning - Annotate WARN()-related UD2 as reachable - Various fixes - Add Add Peter Zijlstra as objtool co-maintainer Misc: - Various x86 entry code self-test fixes - Improve/simplify entry code stack frame generation and handling after recent heavy-handed PTI and Spectre changes. (There's two more WIP improvements expected here.) - Type fix for cache entries There's also some low risk non-fix changes I've included in this branch to reduce backporting conflicts: - rename a confusing x86_cpu field name - de-obfuscate the naming of single-TLB flushing primitives" * 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (41 commits) x86/entry/64: Fix CR3 restore in paranoid_exit() x86/cpu: Change type of x86_cache_size variable to unsigned int x86/spectre: Fix an error message x86/cpu: Rename cpu_data.x86_mask to cpu_data.x86_stepping selftests/x86/mpx: Fix incorrect bounds with old _sigfault x86/mm: Rename flush_tlb_single() and flush_tlb_one() to __flush_tlb_one_[user|kernel]() x86/speculation: Add <asm/msr-index.h> dependency nospec: Move array_index_nospec() parameter checking into separate macro x86/speculation: Fix up array_index_nospec_mask() asm constraint x86/debug: Use UD2 for WARN() x86/debug, objtool: Annotate WARN()-related UD2 as reachable objtool: Fix segfault in ignore_unreachable_insn() selftests/x86: Disable tests requiring 32-bit support on pure 64-bit systems selftests/x86: Do not rely on "int $0x80" in single_step_syscall.c selftests/x86: Do not rely on "int $0x80" in test_mremap_vdso.c selftests/x86: Fix build bug caused by the 5lvl test which has been moved to the VM directory selftests/x86/pkeys: Remove unused functions selftests/x86: Clean up and document sscanf() usage selftests/x86: Fix vDSO selftest segfault for vsyscall=none x86/entry/64: Remove the unused 'icebp' macro ...
This commit is contained in:
@@ -119,7 +119,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
|
||||
return;
|
||||
}
|
||||
|
||||
if (c->x86_model == 6 && c->x86_mask == 1) {
|
||||
if (c->x86_model == 6 && c->x86_stepping == 1) {
|
||||
const int K6_BUG_LOOP = 1000000;
|
||||
int n;
|
||||
void (*f_vide)(void);
|
||||
@@ -149,7 +149,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
|
||||
|
||||
/* K6 with old style WHCR */
|
||||
if (c->x86_model < 8 ||
|
||||
(c->x86_model == 8 && c->x86_mask < 8)) {
|
||||
(c->x86_model == 8 && c->x86_stepping < 8)) {
|
||||
/* We can only write allocate on the low 508Mb */
|
||||
if (mbytes > 508)
|
||||
mbytes = 508;
|
||||
@@ -168,7 +168,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
|
||||
return;
|
||||
}
|
||||
|
||||
if ((c->x86_model == 8 && c->x86_mask > 7) ||
|
||||
if ((c->x86_model == 8 && c->x86_stepping > 7) ||
|
||||
c->x86_model == 9 || c->x86_model == 13) {
|
||||
/* The more serious chips .. */
|
||||
|
||||
@@ -221,7 +221,7 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
|
||||
* are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
|
||||
* As per AMD technical note 27212 0.2
|
||||
*/
|
||||
if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
|
||||
if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
|
||||
rdmsr(MSR_K7_CLK_CTL, l, h);
|
||||
if ((l & 0xfff00000) != 0x20000000) {
|
||||
pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
|
||||
@@ -241,12 +241,12 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
|
||||
* but they are not certified as MP capable.
|
||||
*/
|
||||
/* Athlon 660/661 is valid. */
|
||||
if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
|
||||
(c->x86_mask == 1)))
|
||||
if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
|
||||
(c->x86_stepping == 1)))
|
||||
return;
|
||||
|
||||
/* Duron 670 is valid */
|
||||
if ((c->x86_model == 7) && (c->x86_mask == 0))
|
||||
if ((c->x86_model == 7) && (c->x86_stepping == 0))
|
||||
return;
|
||||
|
||||
/*
|
||||
@@ -256,8 +256,8 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
|
||||
* See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
|
||||
* more.
|
||||
*/
|
||||
if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
|
||||
((c->x86_model == 7) && (c->x86_mask >= 1)) ||
|
||||
if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
|
||||
((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
|
||||
(c->x86_model > 7))
|
||||
if (cpu_has(c, X86_FEATURE_MP))
|
||||
return;
|
||||
@@ -628,7 +628,7 @@ static void early_init_amd(struct cpuinfo_x86 *c)
|
||||
/* Set MTRR capability flag if appropriate */
|
||||
if (c->x86 == 5)
|
||||
if (c->x86_model == 13 || c->x86_model == 9 ||
|
||||
(c->x86_model == 8 && c->x86_mask >= 8))
|
||||
(c->x86_model == 8 && c->x86_stepping >= 8))
|
||||
set_cpu_cap(c, X86_FEATURE_K6_MTRR);
|
||||
#endif
|
||||
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
|
||||
@@ -795,7 +795,7 @@ static void init_amd_zn(struct cpuinfo_x86 *c)
|
||||
* Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
|
||||
* all up to and including B1.
|
||||
*/
|
||||
if (c->x86_model <= 1 && c->x86_mask <= 1)
|
||||
if (c->x86_model <= 1 && c->x86_stepping <= 1)
|
||||
set_cpu_cap(c, X86_FEATURE_CPB);
|
||||
}
|
||||
|
||||
@@ -906,11 +906,11 @@ static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
|
||||
/* AMD errata T13 (order #21922) */
|
||||
if ((c->x86 == 6)) {
|
||||
/* Duron Rev A0 */
|
||||
if (c->x86_model == 3 && c->x86_mask == 0)
|
||||
if (c->x86_model == 3 && c->x86_stepping == 0)
|
||||
size = 64;
|
||||
/* Tbird rev A1/A2 */
|
||||
if (c->x86_model == 4 &&
|
||||
(c->x86_mask == 0 || c->x86_mask == 1))
|
||||
(c->x86_stepping == 0 || c->x86_stepping == 1))
|
||||
size = 256;
|
||||
}
|
||||
return size;
|
||||
@@ -1047,7 +1047,7 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
|
||||
}
|
||||
|
||||
/* OSVW unavailable or ID unknown, match family-model-stepping range */
|
||||
ms = (cpu->x86_model << 4) | cpu->x86_mask;
|
||||
ms = (cpu->x86_model << 4) | cpu->x86_stepping;
|
||||
while ((range = *erratum++))
|
||||
if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
|
||||
(ms >= AMD_MODEL_RANGE_START(range)) &&
|
||||
|
@@ -162,8 +162,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
|
||||
if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
|
||||
return SPECTRE_V2_CMD_NONE;
|
||||
else {
|
||||
ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
|
||||
sizeof(arg));
|
||||
ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
|
||||
if (ret < 0)
|
||||
return SPECTRE_V2_CMD_AUTO;
|
||||
|
||||
@@ -175,8 +174,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
|
||||
}
|
||||
|
||||
if (i >= ARRAY_SIZE(mitigation_options)) {
|
||||
pr_err("unknown option (%s). Switching to AUTO select\n",
|
||||
mitigation_options[i].option);
|
||||
pr_err("unknown option (%s). Switching to AUTO select\n", arg);
|
||||
return SPECTRE_V2_CMD_AUTO;
|
||||
}
|
||||
}
|
||||
@@ -185,8 +183,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
|
||||
cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
|
||||
cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
|
||||
!IS_ENABLED(CONFIG_RETPOLINE)) {
|
||||
pr_err("%s selected but not compiled in. Switching to AUTO select\n",
|
||||
mitigation_options[i].option);
|
||||
pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
|
||||
return SPECTRE_V2_CMD_AUTO;
|
||||
}
|
||||
|
||||
@@ -256,14 +253,14 @@ static void __init spectre_v2_select_mitigation(void)
|
||||
goto retpoline_auto;
|
||||
break;
|
||||
}
|
||||
pr_err("kernel not compiled with retpoline; no mitigation available!");
|
||||
pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
|
||||
return;
|
||||
|
||||
retpoline_auto:
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
|
||||
retpoline_amd:
|
||||
if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
|
||||
pr_err("LFENCE not serializing. Switching to generic retpoline\n");
|
||||
pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
|
||||
goto retpoline_generic;
|
||||
}
|
||||
mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
|
||||
@@ -281,7 +278,7 @@ retpoline_auto:
|
||||
pr_info("%s\n", spectre_v2_strings[mode]);
|
||||
|
||||
/*
|
||||
* If neither SMEP or KPTI are available, there is a risk of
|
||||
* If neither SMEP nor PTI are available, there is a risk of
|
||||
* hitting userspace addresses in the RSB after a context switch
|
||||
* from a shallow call stack to a deeper one. To prevent this fill
|
||||
* the entire RSB, even when using IBRS.
|
||||
@@ -295,21 +292,20 @@ retpoline_auto:
|
||||
if ((!boot_cpu_has(X86_FEATURE_PTI) &&
|
||||
!boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
|
||||
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
|
||||
pr_info("Filling RSB on context switch\n");
|
||||
pr_info("Spectre v2 mitigation: Filling RSB on context switch\n");
|
||||
}
|
||||
|
||||
/* Initialize Indirect Branch Prediction Barrier if supported */
|
||||
if (boot_cpu_has(X86_FEATURE_IBPB)) {
|
||||
setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
|
||||
pr_info("Enabling Indirect Branch Prediction Barrier\n");
|
||||
pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
|
||||
}
|
||||
}
|
||||
|
||||
#undef pr_fmt
|
||||
|
||||
#ifdef CONFIG_SYSFS
|
||||
ssize_t cpu_show_meltdown(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
|
||||
return sprintf(buf, "Not affected\n");
|
||||
@@ -318,16 +314,14 @@ ssize_t cpu_show_meltdown(struct device *dev,
|
||||
return sprintf(buf, "Vulnerable\n");
|
||||
}
|
||||
|
||||
ssize_t cpu_show_spectre_v1(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
|
||||
return sprintf(buf, "Not affected\n");
|
||||
return sprintf(buf, "Mitigation: __user pointer sanitization\n");
|
||||
}
|
||||
|
||||
ssize_t cpu_show_spectre_v2(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
|
||||
return sprintf(buf, "Not affected\n");
|
||||
@@ -337,9 +331,3 @@ ssize_t cpu_show_spectre_v2(struct device *dev,
|
||||
spectre_v2_module_string());
|
||||
}
|
||||
#endif
|
||||
|
||||
void __ibp_barrier(void)
|
||||
{
|
||||
__wrmsr(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__ibp_barrier);
|
||||
|
@@ -140,7 +140,7 @@ static void init_centaur(struct cpuinfo_x86 *c)
|
||||
clear_cpu_cap(c, X86_FEATURE_TSC);
|
||||
break;
|
||||
case 8:
|
||||
switch (c->x86_mask) {
|
||||
switch (c->x86_stepping) {
|
||||
default:
|
||||
name = "2";
|
||||
break;
|
||||
@@ -215,7 +215,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
|
||||
* - Note, it seems this may only be in engineering samples.
|
||||
*/
|
||||
if ((c->x86 == 6) && (c->x86_model == 9) &&
|
||||
(c->x86_mask == 1) && (size == 65))
|
||||
(c->x86_stepping == 1) && (size == 65))
|
||||
size -= 1;
|
||||
return size;
|
||||
}
|
||||
|
@@ -731,7 +731,7 @@ void cpu_detect(struct cpuinfo_x86 *c)
|
||||
cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
|
||||
c->x86 = x86_family(tfms);
|
||||
c->x86_model = x86_model(tfms);
|
||||
c->x86_mask = x86_stepping(tfms);
|
||||
c->x86_stepping = x86_stepping(tfms);
|
||||
|
||||
if (cap0 & (1<<19)) {
|
||||
c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
|
||||
@@ -1184,9 +1184,9 @@ static void identify_cpu(struct cpuinfo_x86 *c)
|
||||
int i;
|
||||
|
||||
c->loops_per_jiffy = loops_per_jiffy;
|
||||
c->x86_cache_size = -1;
|
||||
c->x86_cache_size = 0;
|
||||
c->x86_vendor = X86_VENDOR_UNKNOWN;
|
||||
c->x86_model = c->x86_mask = 0; /* So far unknown... */
|
||||
c->x86_model = c->x86_stepping = 0; /* So far unknown... */
|
||||
c->x86_vendor_id[0] = '\0'; /* Unset */
|
||||
c->x86_model_id[0] = '\0'; /* Unset */
|
||||
c->x86_max_cores = 1;
|
||||
@@ -1378,8 +1378,8 @@ void print_cpu_info(struct cpuinfo_x86 *c)
|
||||
|
||||
pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
|
||||
|
||||
if (c->x86_mask || c->cpuid_level >= 0)
|
||||
pr_cont(", stepping: 0x%x)\n", c->x86_mask);
|
||||
if (c->x86_stepping || c->cpuid_level >= 0)
|
||||
pr_cont(", stepping: 0x%x)\n", c->x86_stepping);
|
||||
else
|
||||
pr_cont(")\n");
|
||||
}
|
||||
|
@@ -215,7 +215,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
|
||||
|
||||
/* common case step number/rev -- exceptions handled below */
|
||||
c->x86_model = (dir1 >> 4) + 1;
|
||||
c->x86_mask = dir1 & 0xf;
|
||||
c->x86_stepping = dir1 & 0xf;
|
||||
|
||||
/* Now cook; the original recipe is by Channing Corn, from Cyrix.
|
||||
* We do the same thing for each generation: we work out
|
||||
|
@@ -116,14 +116,13 @@ struct sku_microcode {
|
||||
u32 microcode;
|
||||
};
|
||||
static const struct sku_microcode spectre_bad_microcodes[] = {
|
||||
{ INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x84 },
|
||||
{ INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x84 },
|
||||
{ INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x84 },
|
||||
{ INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x84 },
|
||||
{ INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x84 },
|
||||
{ INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x80 },
|
||||
{ INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x80 },
|
||||
{ INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x80 },
|
||||
{ INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x80 },
|
||||
{ INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 },
|
||||
{ INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
|
||||
{ INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
|
||||
{ INTEL_FAM6_SKYLAKE_MOBILE, 0x03, 0xc2 },
|
||||
{ INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 },
|
||||
{ INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
|
||||
{ INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b },
|
||||
@@ -136,8 +135,6 @@ static const struct sku_microcode spectre_bad_microcodes[] = {
|
||||
{ INTEL_FAM6_HASWELL_X, 0x02, 0x3b },
|
||||
{ INTEL_FAM6_HASWELL_X, 0x04, 0x10 },
|
||||
{ INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a },
|
||||
/* Updated in the 20180108 release; blacklist until we know otherwise */
|
||||
{ INTEL_FAM6_ATOM_GEMINI_LAKE, 0x01, 0x22 },
|
||||
/* Observed in the wild */
|
||||
{ INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b },
|
||||
{ INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 },
|
||||
@@ -149,7 +146,7 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
|
||||
if (c->x86_model == spectre_bad_microcodes[i].model &&
|
||||
c->x86_mask == spectre_bad_microcodes[i].stepping)
|
||||
c->x86_stepping == spectre_bad_microcodes[i].stepping)
|
||||
return (c->microcode <= spectre_bad_microcodes[i].microcode);
|
||||
}
|
||||
return false;
|
||||
@@ -196,7 +193,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
|
||||
* need the microcode to have already been loaded... so if it is
|
||||
* not, recommend a BIOS update and disable large pages.
|
||||
*/
|
||||
if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 &&
|
||||
if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 &&
|
||||
c->microcode < 0x20e) {
|
||||
pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
|
||||
clear_cpu_cap(c, X86_FEATURE_PSE);
|
||||
@@ -212,7 +209,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
|
||||
|
||||
/* CPUID workaround for 0F33/0F34 CPU */
|
||||
if (c->x86 == 0xF && c->x86_model == 0x3
|
||||
&& (c->x86_mask == 0x3 || c->x86_mask == 0x4))
|
||||
&& (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
|
||||
c->x86_phys_bits = 36;
|
||||
|
||||
/*
|
||||
@@ -310,7 +307,7 @@ int ppro_with_ram_bug(void)
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
|
||||
boot_cpu_data.x86 == 6 &&
|
||||
boot_cpu_data.x86_model == 1 &&
|
||||
boot_cpu_data.x86_mask < 8) {
|
||||
boot_cpu_data.x86_stepping < 8) {
|
||||
pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
|
||||
return 1;
|
||||
}
|
||||
@@ -327,7 +324,7 @@ static void intel_smp_check(struct cpuinfo_x86 *c)
|
||||
* Mask B, Pentium, but not Pentium MMX
|
||||
*/
|
||||
if (c->x86 == 5 &&
|
||||
c->x86_mask >= 1 && c->x86_mask <= 4 &&
|
||||
c->x86_stepping >= 1 && c->x86_stepping <= 4 &&
|
||||
c->x86_model <= 3) {
|
||||
/*
|
||||
* Remember we have B step Pentia with bugs
|
||||
@@ -370,7 +367,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
|
||||
* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
|
||||
* model 3 mask 3
|
||||
*/
|
||||
if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
|
||||
if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633)
|
||||
clear_cpu_cap(c, X86_FEATURE_SEP);
|
||||
|
||||
/*
|
||||
@@ -388,7 +385,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
|
||||
* P4 Xeon erratum 037 workaround.
|
||||
* Hardware prefetcher may cause stale data to be loaded into the cache.
|
||||
*/
|
||||
if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
|
||||
if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) {
|
||||
if (msr_set_bit(MSR_IA32_MISC_ENABLE,
|
||||
MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
|
||||
pr_info("CPU: C0 stepping P4 Xeon detected.\n");
|
||||
@@ -403,7 +400,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
|
||||
* Specification Update").
|
||||
*/
|
||||
if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
|
||||
(c->x86_mask < 0x6 || c->x86_mask == 0xb))
|
||||
(c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
|
||||
set_cpu_bug(c, X86_BUG_11AP);
|
||||
|
||||
|
||||
@@ -650,7 +647,7 @@ static void init_intel(struct cpuinfo_x86 *c)
|
||||
case 6:
|
||||
if (l2 == 128)
|
||||
p = "Celeron (Mendocino)";
|
||||
else if (c->x86_mask == 0 || c->x86_mask == 5)
|
||||
else if (c->x86_stepping == 0 || c->x86_stepping == 5)
|
||||
p = "Celeron-A";
|
||||
break;
|
||||
|
||||
|
@@ -819,7 +819,7 @@ static __init void rdt_quirks(void)
|
||||
cache_alloc_hsw_probe();
|
||||
break;
|
||||
case INTEL_FAM6_SKYLAKE_X:
|
||||
if (boot_cpu_data.x86_mask <= 4)
|
||||
if (boot_cpu_data.x86_stepping <= 4)
|
||||
set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
|
||||
}
|
||||
}
|
||||
|
@@ -921,7 +921,7 @@ static bool is_blacklisted(unsigned int cpu)
|
||||
*/
|
||||
if (c->x86 == 6 &&
|
||||
c->x86_model == INTEL_FAM6_BROADWELL_X &&
|
||||
c->x86_mask == 0x01 &&
|
||||
c->x86_stepping == 0x01 &&
|
||||
llc_size_per_core > 2621440 &&
|
||||
c->microcode < 0x0b000021) {
|
||||
pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
|
||||
@@ -944,7 +944,7 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
|
||||
return UCODE_NFOUND;
|
||||
|
||||
sprintf(name, "intel-ucode/%02x-%02x-%02x",
|
||||
c->x86, c->x86_model, c->x86_mask);
|
||||
c->x86, c->x86_model, c->x86_stepping);
|
||||
|
||||
if (request_firmware_direct(&firmware, name, device)) {
|
||||
pr_debug("data file %s load failed\n", name);
|
||||
@@ -982,7 +982,7 @@ static struct microcode_ops microcode_intel_ops = {
|
||||
|
||||
static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u64 llc_size = c->x86_cache_size * 1024;
|
||||
u64 llc_size = c->x86_cache_size * 1024ULL;
|
||||
|
||||
do_div(llc_size, c->x86_max_cores);
|
||||
|
||||
|
@@ -859,7 +859,7 @@ int generic_validate_add_page(unsigned long base, unsigned long size,
|
||||
*/
|
||||
if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
|
||||
boot_cpu_data.x86_model == 1 &&
|
||||
boot_cpu_data.x86_mask <= 7) {
|
||||
boot_cpu_data.x86_stepping <= 7) {
|
||||
if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
|
||||
pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
|
||||
return -EINVAL;
|
||||
|
@@ -711,8 +711,8 @@ void __init mtrr_bp_init(void)
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
|
||||
boot_cpu_data.x86 == 0xF &&
|
||||
boot_cpu_data.x86_model == 0x3 &&
|
||||
(boot_cpu_data.x86_mask == 0x3 ||
|
||||
boot_cpu_data.x86_mask == 0x4))
|
||||
(boot_cpu_data.x86_stepping == 0x3 ||
|
||||
boot_cpu_data.x86_stepping == 0x4))
|
||||
phys_addr = 36;
|
||||
|
||||
size_or_mask = SIZE_OR_MASK_BITS(phys_addr);
|
||||
|
@@ -72,8 +72,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||
c->x86_model,
|
||||
c->x86_model_id[0] ? c->x86_model_id : "unknown");
|
||||
|
||||
if (c->x86_mask || c->cpuid_level >= 0)
|
||||
seq_printf(m, "stepping\t: %d\n", c->x86_mask);
|
||||
if (c->x86_stepping || c->cpuid_level >= 0)
|
||||
seq_printf(m, "stepping\t: %d\n", c->x86_stepping);
|
||||
else
|
||||
seq_puts(m, "stepping\t: unknown\n");
|
||||
if (c->microcode)
|
||||
@@ -91,8 +91,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||
}
|
||||
|
||||
/* Cache size */
|
||||
if (c->x86_cache_size >= 0)
|
||||
seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
|
||||
if (c->x86_cache_size)
|
||||
seq_printf(m, "cache size\t: %u KB\n", c->x86_cache_size);
|
||||
|
||||
show_cpuinfo_core(m, c, cpu);
|
||||
show_cpuinfo_misc(m, c);
|
||||
|
Reference in New Issue
Block a user