|
@@ -109,9 +109,6 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
|
|
|
/* Control unconditional IBPB in switch_mm() */
|
|
|
DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
|
|
|
|
|
|
-/* Control MDS CPU buffer clear before returning to user space */
|
|
|
-DEFINE_STATIC_KEY_FALSE(mds_user_clear);
|
|
|
-EXPORT_SYMBOL_GPL(mds_user_clear);
|
|
|
/* Control MDS CPU buffer clear before idling (halt, mwait) */
|
|
|
DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
|
|
|
EXPORT_SYMBOL_GPL(mds_idle_clear);
|
|
@@ -249,7 +246,7 @@ static void __init mds_select_mitigation(void)
|
|
|
if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
|
|
|
mds_mitigation = MDS_MITIGATION_VMWERV;
|
|
|
|
|
|
- static_branch_enable(&mds_user_clear);
|
|
|
+ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
|
|
|
|
|
|
if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
|
|
|
(mds_nosmt || cpu_mitigations_auto_nosmt()))
|
|
@@ -353,7 +350,7 @@ static void __init taa_select_mitigation(void)
|
|
|
* For guests that can't determine whether the correct microcode is
|
|
|
* present on host, enable the mitigation for UCODE_NEEDED as well.
|
|
|
*/
|
|
|
- static_branch_enable(&mds_user_clear);
|
|
|
+ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
|
|
|
|
|
|
if (taa_nosmt || cpu_mitigations_auto_nosmt())
|
|
|
cpu_smt_disable(false);
|
|
@@ -421,7 +418,14 @@ static void __init mmio_select_mitigation(void)
|
|
|
*/
|
|
|
if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) &&
|
|
|
boot_cpu_has(X86_FEATURE_RTM)))
|
|
|
- static_branch_enable(&mds_user_clear);
|
|
|
+ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * X86_FEATURE_CLEAR_CPU_BUF could be enabled by other VERW based
|
|
|
+ * mitigations, disable KVM-only mitigation in that case.
|
|
|
+ */
|
|
|
+ if (boot_cpu_has(X86_FEATURE_CLEAR_CPU_BUF))
|
|
|
+ static_branch_disable(&mmio_stale_data_clear);
|
|
|
else
|
|
|
static_branch_enable(&mmio_stale_data_clear);
|
|
|
|
|
@@ -473,6 +477,57 @@ static int __init mmio_stale_data_parse_cmdline(char *str)
|
|
|
}
|
|
|
early_param("mmio_stale_data", mmio_stale_data_parse_cmdline);
|
|
|
|
|
|
+#undef pr_fmt
|
|
|
+#define pr_fmt(fmt) "Register File Data Sampling: " fmt
|
|
|
+
|
|
|
+enum rfds_mitigations {
|
|
|
+ RFDS_MITIGATION_OFF,
|
|
|
+ RFDS_MITIGATION_VERW,
|
|
|
+ RFDS_MITIGATION_UCODE_NEEDED,
|
|
|
+};
|
|
|
+
|
|
|
+/* Default mitigation for Register File Data Sampling */
|
|
|
+static enum rfds_mitigations rfds_mitigation __ro_after_init =
|
|
|
+ IS_ENABLED(CONFIG_MITIGATION_RFDS) ? RFDS_MITIGATION_VERW : RFDS_MITIGATION_OFF;
|
|
|
+
|
|
|
+static const char * const rfds_strings[] = {
|
|
|
+ [RFDS_MITIGATION_OFF] = "Vulnerable",
|
|
|
+ [RFDS_MITIGATION_VERW] = "Mitigation: Clear Register File",
|
|
|
+ [RFDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
|
|
|
+};
|
|
|
+
|
|
|
+static void __init rfds_select_mitigation(void)
|
|
|
+{
|
|
|
+ if (!boot_cpu_has_bug(X86_BUG_RFDS) || cpu_mitigations_off()) {
|
|
|
+ rfds_mitigation = RFDS_MITIGATION_OFF;
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ if (rfds_mitigation == RFDS_MITIGATION_OFF)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (x86_read_arch_cap_msr() & ARCH_CAP_RFDS_CLEAR)
|
|
|
+ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
|
|
|
+ else
|
|
|
+ rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
|
|
|
+}
|
|
|
+
|
|
|
+static __init int rfds_parse_cmdline(char *str)
|
|
|
+{
|
|
|
+ if (!str)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (!boot_cpu_has_bug(X86_BUG_RFDS))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ if (!strcmp(str, "off"))
|
|
|
+ rfds_mitigation = RFDS_MITIGATION_OFF;
|
|
|
+ else if (!strcmp(str, "on"))
|
|
|
+ rfds_mitigation = RFDS_MITIGATION_VERW;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+early_param("reg_file_data_sampling", rfds_parse_cmdline);
|
|
|
+
|
|
|
#undef pr_fmt
|
|
|
#define pr_fmt(fmt) "" fmt
|
|
|
|
|
@@ -481,12 +536,12 @@ static void __init md_clear_update_mitigation(void)
|
|
|
if (cpu_mitigations_off())
|
|
|
return;
|
|
|
|
|
|
- if (!static_key_enabled(&mds_user_clear))
|
|
|
+ if (!boot_cpu_has(X86_FEATURE_CLEAR_CPU_BUF))
|
|
|
goto out;
|
|
|
|
|
|
/*
|
|
|
- * mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data
|
|
|
- * mitigation, if necessary.
|
|
|
+ * X86_FEATURE_CLEAR_CPU_BUF is now enabled. Update MDS, TAA and MMIO
|
|
|
+ * Stale Data mitigation, if necessary.
|
|
|
*/
|
|
|
if (mds_mitigation == MDS_MITIGATION_OFF &&
|
|
|
boot_cpu_has_bug(X86_BUG_MDS)) {
|
|
@@ -498,11 +553,19 @@ static void __init md_clear_update_mitigation(void)
|
|
|
taa_mitigation = TAA_MITIGATION_VERW;
|
|
|
taa_select_mitigation();
|
|
|
}
|
|
|
- if (mmio_mitigation == MMIO_MITIGATION_OFF &&
|
|
|
- boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) {
|
|
|
+ /*
|
|
|
+ * MMIO_MITIGATION_OFF is not checked here so that mmio_stale_data_clear
|
|
|
+ * gets updated correctly as per X86_FEATURE_CLEAR_CPU_BUF state.
|
|
|
+ */
|
|
|
+ if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) {
|
|
|
mmio_mitigation = MMIO_MITIGATION_VERW;
|
|
|
mmio_select_mitigation();
|
|
|
}
|
|
|
+ if (rfds_mitigation == RFDS_MITIGATION_OFF &&
|
|
|
+ boot_cpu_has_bug(X86_BUG_RFDS)) {
|
|
|
+ rfds_mitigation = RFDS_MITIGATION_VERW;
|
|
|
+ rfds_select_mitigation();
|
|
|
+ }
|
|
|
out:
|
|
|
if (boot_cpu_has_bug(X86_BUG_MDS))
|
|
|
pr_info("MDS: %s\n", mds_strings[mds_mitigation]);
|
|
@@ -512,6 +575,8 @@ out:
|
|
|
pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
|
|
|
else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
|
|
|
pr_info("MMIO Stale Data: Unknown: No mitigations\n");
|
|
|
+ if (boot_cpu_has_bug(X86_BUG_RFDS))
|
|
|
+ pr_info("Register File Data Sampling: %s\n", rfds_strings[rfds_mitigation]);
|
|
|
}
|
|
|
|
|
|
static void __init md_clear_select_mitigation(void)
|
|
@@ -519,11 +584,12 @@ static void __init md_clear_select_mitigation(void)
|
|
|
mds_select_mitigation();
|
|
|
taa_select_mitigation();
|
|
|
mmio_select_mitigation();
|
|
|
+ rfds_select_mitigation();
|
|
|
|
|
|
/*
|
|
|
- * As MDS, TAA and MMIO Stale Data mitigations are inter-related, update
|
|
|
- * and print their mitigation after MDS, TAA and MMIO Stale Data
|
|
|
- * mitigation selection is done.
|
|
|
+ * As these mitigations are inter-related and rely on VERW instruction
|
|
|
+ * to clear the microarchitural buffers, update and print their status
|
|
|
+ * after mitigation selection is done for each of these vulnerabilities.
|
|
|
*/
|
|
|
md_clear_update_mitigation();
|
|
|
}
|
|
@@ -1251,19 +1317,21 @@ spectre_v2_user_select_mitigation(void)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * If no STIBP, enhanced IBRS is enabled, or SMT impossible, STIBP
|
|
|
+ * If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP
|
|
|
* is not required.
|
|
|
*
|
|
|
- * Enhanced IBRS also protects against cross-thread branch target
|
|
|
+ * Intel's Enhanced IBRS also protects against cross-thread branch target
|
|
|
* injection in user-mode as the IBRS bit remains always set which
|
|
|
* implicitly enables cross-thread protections. However, in legacy IBRS
|
|
|
* mode, the IBRS bit is set only on kernel entry and cleared on return
|
|
|
- * to userspace. This disables the implicit cross-thread protection,
|
|
|
- * so allow for STIBP to be selected in that case.
|
|
|
+ * to userspace. AMD Automatic IBRS also does not protect userspace.
|
|
|
+ * These modes therefore disable the implicit cross-thread protection,
|
|
|
+ * so allow for STIBP to be selected in those cases.
|
|
|
*/
|
|
|
if (!boot_cpu_has(X86_FEATURE_STIBP) ||
|
|
|
!smt_possible ||
|
|
|
- spectre_v2_in_eibrs_mode(spectre_v2_enabled))
|
|
|
+ (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
|
|
|
+ !boot_cpu_has(X86_FEATURE_AUTOIBRS)))
|
|
|
return;
|
|
|
|
|
|
/*
|
|
@@ -1293,9 +1361,9 @@ static const char * const spectre_v2_strings[] = {
|
|
|
[SPECTRE_V2_NONE] = "Vulnerable",
|
|
|
[SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines",
|
|
|
[SPECTRE_V2_LFENCE] = "Mitigation: LFENCE",
|
|
|
- [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced IBRS",
|
|
|
- [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced IBRS + LFENCE",
|
|
|
- [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced IBRS + Retpolines",
|
|
|
+ [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced / Automatic IBRS",
|
|
|
+ [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced / Automatic IBRS + LFENCE",
|
|
|
+ [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced / Automatic IBRS + Retpolines",
|
|
|
[SPECTRE_V2_IBRS] = "Mitigation: IBRS",
|
|
|
};
|
|
|
|
|
@@ -1364,7 +1432,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
|
|
|
cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
|
|
|
cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
|
|
|
!boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
|
|
|
- pr_err("%s selected but CPU doesn't have eIBRS. Switching to AUTO select\n",
|
|
|
+ pr_err("%s selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n",
|
|
|
mitigation_options[i].option);
|
|
|
return SPECTRE_V2_CMD_AUTO;
|
|
|
}
|
|
@@ -1549,8 +1617,12 @@ static void __init spectre_v2_select_mitigation(void)
|
|
|
pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
|
|
|
|
|
|
if (spectre_v2_in_ibrs_mode(mode)) {
|
|
|
- x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
|
|
|
- update_spec_ctrl(x86_spec_ctrl_base);
|
|
|
+ if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) {
|
|
|
+ msr_set_bit(MSR_EFER, _EFER_AUTOIBRS);
|
|
|
+ } else {
|
|
|
+ x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
|
|
|
+ update_spec_ctrl(x86_spec_ctrl_base);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
switch (mode) {
|
|
@@ -1634,8 +1706,8 @@ static void __init spectre_v2_select_mitigation(void)
|
|
|
/*
|
|
|
* Retpoline protects the kernel, but doesn't protect firmware. IBRS
|
|
|
* and Enhanced IBRS protect firmware too, so enable IBRS around
|
|
|
- * firmware calls only when IBRS / Enhanced IBRS aren't otherwise
|
|
|
- * enabled.
|
|
|
+ * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't
|
|
|
+ * otherwise enabled.
|
|
|
*
|
|
|
* Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
|
|
|
* the user might select retpoline on the kernel command line and if
|
|
@@ -2432,74 +2504,74 @@ static const char * const l1tf_vmx_states[] = {
|
|
|
static ssize_t l1tf_show_state(char *buf)
|
|
|
{
|
|
|
if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
|
|
|
- return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
|
|
|
+ return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
|
|
|
|
|
|
if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
|
|
|
(l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
|
|
|
sched_smt_active())) {
|
|
|
- return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
|
|
|
- l1tf_vmx_states[l1tf_vmx_mitigation]);
|
|
|
+ return sysfs_emit(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
|
|
|
+ l1tf_vmx_states[l1tf_vmx_mitigation]);
|
|
|
}
|
|
|
|
|
|
- return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
|
|
|
- l1tf_vmx_states[l1tf_vmx_mitigation],
|
|
|
- sched_smt_active() ? "vulnerable" : "disabled");
|
|
|
+ return sysfs_emit(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
|
|
|
+ l1tf_vmx_states[l1tf_vmx_mitigation],
|
|
|
+ sched_smt_active() ? "vulnerable" : "disabled");
|
|
|
}
|
|
|
|
|
|
static ssize_t itlb_multihit_show_state(char *buf)
|
|
|
{
|
|
|
if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) ||
|
|
|
!boot_cpu_has(X86_FEATURE_VMX))
|
|
|
- return sprintf(buf, "KVM: Mitigation: VMX unsupported\n");
|
|
|
+ return sysfs_emit(buf, "KVM: Mitigation: VMX unsupported\n");
|
|
|
else if (!(cr4_read_shadow() & X86_CR4_VMXE))
|
|
|
- return sprintf(buf, "KVM: Mitigation: VMX disabled\n");
|
|
|
+ return sysfs_emit(buf, "KVM: Mitigation: VMX disabled\n");
|
|
|
else if (itlb_multihit_kvm_mitigation)
|
|
|
- return sprintf(buf, "KVM: Mitigation: Split huge pages\n");
|
|
|
+ return sysfs_emit(buf, "KVM: Mitigation: Split huge pages\n");
|
|
|
else
|
|
|
- return sprintf(buf, "KVM: Vulnerable\n");
|
|
|
+ return sysfs_emit(buf, "KVM: Vulnerable\n");
|
|
|
}
|
|
|
#else
|
|
|
static ssize_t l1tf_show_state(char *buf)
|
|
|
{
|
|
|
- return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
|
|
|
+ return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
|
|
|
}
|
|
|
|
|
|
static ssize_t itlb_multihit_show_state(char *buf)
|
|
|
{
|
|
|
- return sprintf(buf, "Processor vulnerable\n");
|
|
|
+ return sysfs_emit(buf, "Processor vulnerable\n");
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
static ssize_t mds_show_state(char *buf)
|
|
|
{
|
|
|
if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
|
|
|
- return sprintf(buf, "%s; SMT Host state unknown\n",
|
|
|
- mds_strings[mds_mitigation]);
|
|
|
+ return sysfs_emit(buf, "%s; SMT Host state unknown\n",
|
|
|
+ mds_strings[mds_mitigation]);
|
|
|
}
|
|
|
|
|
|
if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
|
|
|
- return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
|
|
|
- (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
|
|
|
- sched_smt_active() ? "mitigated" : "disabled"));
|
|
|
+ return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
|
|
|
+ (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
|
|
|
+ sched_smt_active() ? "mitigated" : "disabled"));
|
|
|
}
|
|
|
|
|
|
- return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
|
|
|
- sched_smt_active() ? "vulnerable" : "disabled");
|
|
|
+ return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
|
|
|
+ sched_smt_active() ? "vulnerable" : "disabled");
|
|
|
}
|
|
|
|
|
|
static ssize_t tsx_async_abort_show_state(char *buf)
|
|
|
{
|
|
|
if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) ||
|
|
|
(taa_mitigation == TAA_MITIGATION_OFF))
|
|
|
- return sprintf(buf, "%s\n", taa_strings[taa_mitigation]);
|
|
|
+ return sysfs_emit(buf, "%s\n", taa_strings[taa_mitigation]);
|
|
|
|
|
|
if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
|
|
|
- return sprintf(buf, "%s; SMT Host state unknown\n",
|
|
|
- taa_strings[taa_mitigation]);
|
|
|
+ return sysfs_emit(buf, "%s; SMT Host state unknown\n",
|
|
|
+ taa_strings[taa_mitigation]);
|
|
|
}
|
|
|
|
|
|
- return sprintf(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
|
|
|
- sched_smt_active() ? "vulnerable" : "disabled");
|
|
|
+ return sysfs_emit(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
|
|
|
+ sched_smt_active() ? "vulnerable" : "disabled");
|
|
|
}
|
|
|
|
|
|
static ssize_t mmio_stale_data_show_state(char *buf)
|
|
@@ -2519,9 +2591,15 @@ static ssize_t mmio_stale_data_show_state(char *buf)
|
|
|
sched_smt_active() ? "vulnerable" : "disabled");
|
|
|
}
|
|
|
|
|
|
+static ssize_t rfds_show_state(char *buf)
|
|
|
+{
|
|
|
+ return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]);
|
|
|
+}
|
|
|
+
|
|
|
static char *stibp_state(void)
|
|
|
{
|
|
|
- if (spectre_v2_in_eibrs_mode(spectre_v2_enabled))
|
|
|
+ if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
|
|
|
+ !boot_cpu_has(X86_FEATURE_AUTOIBRS))
|
|
|
return "";
|
|
|
|
|
|
switch (spectre_v2_user_stibp) {
|
|
@@ -2567,47 +2645,46 @@ static char *pbrsb_eibrs_state(void)
|
|
|
static ssize_t spectre_v2_show_state(char *buf)
|
|
|
{
|
|
|
if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
|
|
|
- return sprintf(buf, "Vulnerable: LFENCE\n");
|
|
|
+ return sysfs_emit(buf, "Vulnerable: LFENCE\n");
|
|
|
|
|
|
if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
|
|
|
- return sprintf(buf, "Vulnerable: eIBRS with unprivileged eBPF\n");
|
|
|
+ return sysfs_emit(buf, "Vulnerable: eIBRS with unprivileged eBPF\n");
|
|
|
|
|
|
if (sched_smt_active() && unprivileged_ebpf_enabled() &&
|
|
|
spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
|
|
|
- return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
|
|
|
+ return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
|
|
|
|
|
|
- return sprintf(buf, "%s%s%s%s%s%s%s\n",
|
|
|
- spectre_v2_strings[spectre_v2_enabled],
|
|
|
- ibpb_state(),
|
|
|
- boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
|
|
|
- stibp_state(),
|
|
|
- boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
|
|
|
- pbrsb_eibrs_state(),
|
|
|
- spectre_v2_module_string());
|
|
|
+ return sysfs_emit(buf, "%s%s%s%s%s%s%s\n",
|
|
|
+ spectre_v2_strings[spectre_v2_enabled],
|
|
|
+ ibpb_state(),
|
|
|
+ boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
|
|
|
+ stibp_state(),
|
|
|
+ boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
|
|
|
+ pbrsb_eibrs_state(),
|
|
|
+ spectre_v2_module_string());
|
|
|
}
|
|
|
|
|
|
static ssize_t srbds_show_state(char *buf)
|
|
|
{
|
|
|
- return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
|
|
|
+ return sysfs_emit(buf, "%s\n", srbds_strings[srbds_mitigation]);
|
|
|
}
|
|
|
|
|
|
static ssize_t retbleed_show_state(char *buf)
|
|
|
{
|
|
|
if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
|
|
|
retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
|
|
|
- if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
|
|
|
- boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
|
|
|
- return sprintf(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n");
|
|
|
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
|
|
|
+ boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
|
|
|
+ return sysfs_emit(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n");
|
|
|
|
|
|
- return sprintf(buf, "%s; SMT %s\n",
|
|
|
- retbleed_strings[retbleed_mitigation],
|
|
|
- !sched_smt_active() ? "disabled" :
|
|
|
- spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
|
|
|
- spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ?
|
|
|
- "enabled with STIBP protection" : "vulnerable");
|
|
|
+ return sysfs_emit(buf, "%s; SMT %s\n", retbleed_strings[retbleed_mitigation],
|
|
|
+ !sched_smt_active() ? "disabled" :
|
|
|
+ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
|
|
|
+ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ?
|
|
|
+ "enabled with STIBP protection" : "vulnerable");
|
|
|
}
|
|
|
|
|
|
- return sprintf(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
|
|
|
+ return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
|
|
|
}
|
|
|
|
|
|
static ssize_t gds_show_state(char *buf)
|
|
@@ -2629,26 +2706,26 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
|
|
|
char *buf, unsigned int bug)
|
|
|
{
|
|
|
if (!boot_cpu_has_bug(bug))
|
|
|
- return sprintf(buf, "Not affected\n");
|
|
|
+ return sysfs_emit(buf, "Not affected\n");
|
|
|
|
|
|
switch (bug) {
|
|
|
case X86_BUG_CPU_MELTDOWN:
|
|
|
if (boot_cpu_has(X86_FEATURE_PTI))
|
|
|
- return sprintf(buf, "Mitigation: PTI\n");
|
|
|
+ return sysfs_emit(buf, "Mitigation: PTI\n");
|
|
|
|
|
|
if (hypervisor_is_type(X86_HYPER_XEN_PV))
|
|
|
- return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
|
|
|
+ return sysfs_emit(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
|
|
|
|
|
|
break;
|
|
|
|
|
|
case X86_BUG_SPECTRE_V1:
|
|
|
- return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
|
|
|
+ return sysfs_emit(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
|
|
|
|
|
|
case X86_BUG_SPECTRE_V2:
|
|
|
return spectre_v2_show_state(buf);
|
|
|
|
|
|
case X86_BUG_SPEC_STORE_BYPASS:
|
|
|
- return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
|
|
|
+ return sysfs_emit(buf, "%s\n", ssb_strings[ssb_mode]);
|
|
|
|
|
|
case X86_BUG_L1TF:
|
|
|
if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
|
|
@@ -2680,11 +2757,14 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
|
|
|
case X86_BUG_SRSO:
|
|
|
return srso_show_state(buf);
|
|
|
|
|
|
+ case X86_BUG_RFDS:
|
|
|
+ return rfds_show_state(buf);
|
|
|
+
|
|
|
default:
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- return sprintf(buf, "Vulnerable\n");
|
|
|
+ return sysfs_emit(buf, "Vulnerable\n");
|
|
|
}
|
|
|
|
|
|
ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
|
|
@@ -2754,4 +2834,9 @@ ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribut
|
|
|
{
|
|
|
return cpu_show_common(dev, attr, buf, X86_BUG_SRSO);
|
|
|
}
|
|
|
+
|
|
|
+ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attribute *attr, char *buf)
|
|
|
+{
|
|
|
+ return cpu_show_common(dev, attr, buf, X86_BUG_RFDS);
|
|
|
+}
|
|
|
#endif
|