
AMD is proposing a VIRT_SPEC_CTRL MSR to handle the Speculative Store Bypass Disable via MSR_AMD64_LS_CFG so that guests do not have to care about the bit position of the SSBD bit and thus facilitate migration. Also, the sibling coordination on Family 17H CPUs can only be done on the host. Extend x86_spec_ctrl_set_guest() and x86_spec_ctrl_restore_host() with an extra argument for the VIRT_SPEC_CTRL MSR. Hand in 0 from VMX and in SVM add a new virt_spec_ctrl member to the CPU data structure which is going to be used in later patches for the actual implementation. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Borislav Petkov <bp@suse.de> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
48 lines
1.4 KiB
C
48 lines
1.4 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_X86_SPECCTRL_H_
|
|
#define _ASM_X86_SPECCTRL_H_
|
|
|
|
#include <linux/thread_info.h>
|
|
#include <asm/nospec-branch.h>
|
|
|
|
/*
|
|
* On VMENTER we must preserve whatever view of the SPEC_CTRL MSR
|
|
* the guest has, while on VMEXIT we restore the host view. This
|
|
* would be easier if SPEC_CTRL were architecturally maskable or
|
|
* shadowable for guests but this is not (currently) the case.
|
|
* Takes the guest view of SPEC_CTRL MSR as a parameter and also
|
|
* the guest's version of VIRT_SPEC_CTRL, if emulated.
|
|
*/
|
|
extern void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl,
|
|
u64 guest_virt_spec_ctrl);
|
|
extern void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl,
|
|
u64 guest_virt_spec_ctrl);
|
|
|
|
/* AMD specific Speculative Store Bypass MSR data */
|
|
extern u64 x86_amd_ls_cfg_base;
|
|
extern u64 x86_amd_ls_cfg_ssbd_mask;
|
|
|
|
/* The Intel SPEC CTRL MSR base value cache */
|
|
extern u64 x86_spec_ctrl_base;
|
|
|
|
static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
|
|
{
|
|
BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
|
|
return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
|
|
}
|
|
|
|
static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
|
|
{
|
|
return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
extern void speculative_store_bypass_ht_init(void);
|
|
#else
|
|
static inline void speculative_store_bypass_ht_init(void) { }
|
|
#endif
|
|
|
|
extern void speculative_store_bypass_update(void);
|
|
|
|
#endif
|