Merge branch 'linus' into x86/urgent, to pick up dependent changes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -344,6 +344,7 @@
|
||||
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
|
||||
#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
|
||||
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
|
||||
#define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */
|
||||
#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
|
||||
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
|
||||
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
|
||||
@@ -382,5 +383,7 @@
|
||||
#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
|
||||
#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
|
||||
#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
|
||||
#define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
|
||||
#define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */
|
||||
|
||||
#endif /* _ASM_X86_CPUFEATURES_H */
|
||||
|
@@ -13,14 +13,7 @@
|
||||
#include <asm/swiotlb.h>
|
||||
#include <linux/dma-contiguous.h>
|
||||
|
||||
#ifdef CONFIG_ISA
|
||||
# define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
|
||||
#else
|
||||
# define ISA_DMA_BIT_MASK DMA_BIT_MASK(32)
|
||||
#endif
|
||||
|
||||
extern int iommu_merge;
|
||||
extern struct device x86_dma_fallback_dev;
|
||||
extern int panic_on_overflow;
|
||||
|
||||
extern const struct dma_map_ops *dma_ops;
|
||||
@@ -30,7 +23,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
||||
return dma_ops;
|
||||
}
|
||||
|
||||
bool arch_dma_alloc_attrs(struct device **dev);
|
||||
#define arch_dma_alloc_attrs arch_dma_alloc_attrs
|
||||
|
||||
#endif
|
||||
|
@@ -10,6 +10,7 @@
|
||||
|
||||
#ifndef _ASM_X86_FPU_API_H
|
||||
#define _ASM_X86_FPU_API_H
|
||||
#include <linux/bottom_half.h>
|
||||
|
||||
/*
|
||||
* Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It
|
||||
@@ -21,6 +22,36 @@
|
||||
extern void kernel_fpu_begin(void);
|
||||
extern void kernel_fpu_end(void);
|
||||
extern bool irq_fpu_usable(void);
|
||||
extern void fpregs_mark_activate(void);
|
||||
|
||||
/*
|
||||
* Use fpregs_lock() while editing CPU's FPU registers or fpu->state.
|
||||
* A context switch will (and softirq might) save CPU's FPU registers to
|
||||
* fpu->state and set TIF_NEED_FPU_LOAD leaving CPU's FPU registers in
|
||||
* a random state.
|
||||
*/
|
||||
static inline void fpregs_lock(void)
|
||||
{
|
||||
preempt_disable();
|
||||
local_bh_disable();
|
||||
}
|
||||
|
||||
static inline void fpregs_unlock(void)
|
||||
{
|
||||
local_bh_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_DEBUG_FPU
|
||||
extern void fpregs_assert_state_consistent(void);
|
||||
#else
|
||||
static inline void fpregs_assert_state_consistent(void) { }
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Load the task FPU state before returning to userspace.
|
||||
*/
|
||||
extern void switch_fpu_return(void);
|
||||
|
||||
/*
|
||||
* Query the presence of one or more xfeatures. Works on any legacy CPU as well.
|
||||
|
@@ -14,6 +14,7 @@
|
||||
#include <linux/compat.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/user.h>
|
||||
#include <asm/fpu/api.h>
|
||||
@@ -24,14 +25,12 @@
|
||||
/*
|
||||
* High level FPU state handling functions:
|
||||
*/
|
||||
extern void fpu__initialize(struct fpu *fpu);
|
||||
extern void fpu__prepare_read(struct fpu *fpu);
|
||||
extern void fpu__prepare_write(struct fpu *fpu);
|
||||
extern void fpu__save(struct fpu *fpu);
|
||||
extern void fpu__restore(struct fpu *fpu);
|
||||
extern int fpu__restore_sig(void __user *buf, int ia32_frame);
|
||||
extern void fpu__drop(struct fpu *fpu);
|
||||
extern int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu);
|
||||
extern int fpu__copy(struct task_struct *dst, struct task_struct *src);
|
||||
extern void fpu__clear(struct fpu *fpu);
|
||||
extern int fpu__exception_code(struct fpu *fpu, int trap_nr);
|
||||
extern int dump_fpu(struct pt_regs *ptregs, struct user_i387_struct *fpstate);
|
||||
@@ -122,6 +121,21 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);
|
||||
err; \
|
||||
})
|
||||
|
||||
#define kernel_insn_err(insn, output, input...) \
|
||||
({ \
|
||||
int err; \
|
||||
asm volatile("1:" #insn "\n\t" \
|
||||
"2:\n" \
|
||||
".section .fixup,\"ax\"\n" \
|
||||
"3: movl $-1,%[err]\n" \
|
||||
" jmp 2b\n" \
|
||||
".previous\n" \
|
||||
_ASM_EXTABLE(1b, 3b) \
|
||||
: [err] "=r" (err), output \
|
||||
: "0"(0), input); \
|
||||
err; \
|
||||
})
|
||||
|
||||
#define kernel_insn(insn, output, input...) \
|
||||
asm volatile("1:" #insn "\n\t" \
|
||||
"2:\n" \
|
||||
@@ -150,6 +164,14 @@ static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
|
||||
kernel_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
|
||||
}
|
||||
|
||||
static inline int copy_kernel_to_fxregs_err(struct fxregs_state *fx)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_X86_32))
|
||||
return kernel_insn_err(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
|
||||
else
|
||||
return kernel_insn_err(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
|
||||
}
|
||||
|
||||
static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_X86_32))
|
||||
@@ -163,6 +185,11 @@ static inline void copy_kernel_to_fregs(struct fregs_state *fx)
|
||||
kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
|
||||
}
|
||||
|
||||
static inline int copy_kernel_to_fregs_err(struct fregs_state *fx)
|
||||
{
|
||||
return kernel_insn_err(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
|
||||
}
|
||||
|
||||
static inline int copy_user_to_fregs(struct fregs_state __user *fx)
|
||||
{
|
||||
return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
|
||||
@@ -362,6 +389,21 @@ static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Restore xstate from kernel space xsave area, return an error code instead of
|
||||
* an exception.
|
||||
*/
|
||||
static inline int copy_kernel_to_xregs_err(struct xregs_state *xstate, u64 mask)
|
||||
{
|
||||
u32 lmask = mask;
|
||||
u32 hmask = mask >> 32;
|
||||
int err;
|
||||
|
||||
XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* These must be called with preempt disabled. Returns
|
||||
* 'true' if the FPU state is still intact and we can
|
||||
@@ -486,6 +528,25 @@ static inline void fpregs_activate(struct fpu *fpu)
|
||||
trace_x86_fpu_regs_activated(fpu);
|
||||
}
|
||||
|
||||
/*
|
||||
* Internal helper, do not use directly. Use switch_fpu_return() instead.
|
||||
*/
|
||||
static inline void __fpregs_load_activate(void)
|
||||
{
|
||||
struct fpu *fpu = ¤t->thread.fpu;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
if (WARN_ON_ONCE(current->mm == NULL))
|
||||
return;
|
||||
|
||||
if (!fpregs_state_valid(fpu, cpu)) {
|
||||
copy_kernel_to_fpregs(&fpu->state);
|
||||
fpregs_activate(fpu);
|
||||
fpu->last_cpu = cpu;
|
||||
}
|
||||
clear_thread_flag(TIF_NEED_FPU_LOAD);
|
||||
}
|
||||
|
||||
/*
|
||||
* FPU state switching for scheduling.
|
||||
*
|
||||
@@ -494,12 +555,23 @@ static inline void fpregs_activate(struct fpu *fpu)
|
||||
* - switch_fpu_prepare() saves the old state.
|
||||
* This is done within the context of the old process.
|
||||
*
|
||||
* - switch_fpu_finish() restores the new state as
|
||||
* necessary.
|
||||
* - switch_fpu_finish() sets TIF_NEED_FPU_LOAD; the floating point state
|
||||
* will get loaded on return to userspace, or when the kernel needs it.
|
||||
*
|
||||
* If TIF_NEED_FPU_LOAD is cleared then the CPU's FPU registers
|
||||
* are saved in the current thread's FPU register state.
|
||||
*
|
||||
* If TIF_NEED_FPU_LOAD is set then CPU's FPU registers may not
|
||||
* hold current()'s FPU registers. It is required to load the
|
||||
* registers before returning to userland or using the content
|
||||
* otherwise.
|
||||
*
|
||||
* The FPU context is only stored/restored for a user task and
|
||||
* ->mm is used to distinguish between kernel and user threads.
|
||||
*/
|
||||
static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
|
||||
{
|
||||
if (static_cpu_has(X86_FEATURE_FPU) && old_fpu->initialized) {
|
||||
if (static_cpu_has(X86_FEATURE_FPU) && current->mm) {
|
||||
if (!copy_fpregs_to_fpstate(old_fpu))
|
||||
old_fpu->last_cpu = -1;
|
||||
else
|
||||
@@ -507,8 +579,7 @@ static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
|
||||
|
||||
/* But leave fpu_fpregs_owner_ctx! */
|
||||
trace_x86_fpu_regs_deactivated(old_fpu);
|
||||
} else
|
||||
old_fpu->last_cpu = -1;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -516,36 +587,32 @@ static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
|
||||
*/
|
||||
|
||||
/*
|
||||
* Set up the userspace FPU context for the new task, if the task
|
||||
* has used the FPU.
|
||||
* Load PKRU from the FPU context if available. Delay loading of the
|
||||
* complete FPU state until the return to userland.
|
||||
*/
|
||||
static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu)
|
||||
static inline void switch_fpu_finish(struct fpu *new_fpu)
|
||||
{
|
||||
bool preload = static_cpu_has(X86_FEATURE_FPU) &&
|
||||
new_fpu->initialized;
|
||||
u32 pkru_val = init_pkru_value;
|
||||
struct pkru_state *pk;
|
||||
|
||||
if (preload) {
|
||||
if (!fpregs_state_valid(new_fpu, cpu))
|
||||
copy_kernel_to_fpregs(&new_fpu->state);
|
||||
fpregs_activate(new_fpu);
|
||||
if (!static_cpu_has(X86_FEATURE_FPU))
|
||||
return;
|
||||
|
||||
set_thread_flag(TIF_NEED_FPU_LOAD);
|
||||
|
||||
if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
|
||||
return;
|
||||
|
||||
/*
|
||||
* PKRU state is switched eagerly because it needs to be valid before we
|
||||
* return to userland e.g. for a copy_to_user() operation.
|
||||
*/
|
||||
if (current->mm) {
|
||||
pk = get_xsave_addr(&new_fpu->state.xsave, XFEATURE_PKRU);
|
||||
if (pk)
|
||||
pkru_val = pk->pkru;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Needs to be preemption-safe.
|
||||
*
|
||||
* NOTE! user_fpu_begin() must be used only immediately before restoring
|
||||
* the save state. It does not do any saving/restoring on its own. In
|
||||
* lazy FPU mode, it is just an optimization to avoid a #NM exception,
|
||||
* the task can lose the FPU right after preempt_enable().
|
||||
*/
|
||||
static inline void user_fpu_begin(void)
|
||||
{
|
||||
struct fpu *fpu = ¤t->thread.fpu;
|
||||
|
||||
preempt_disable();
|
||||
fpregs_activate(fpu);
|
||||
preempt_enable();
|
||||
__write_pkru(pkru_val);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -22,7 +22,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
|
||||
|
||||
extern void convert_from_fxsr(struct user_i387_ia32_struct *env,
|
||||
struct task_struct *tsk);
|
||||
extern void convert_to_fxsr(struct task_struct *tsk,
|
||||
extern void convert_to_fxsr(struct fxregs_state *fxsave,
|
||||
const struct user_i387_ia32_struct *env);
|
||||
|
||||
unsigned long
|
||||
|
@@ -293,15 +293,6 @@ struct fpu {
|
||||
*/
|
||||
unsigned int last_cpu;
|
||||
|
||||
/*
|
||||
* @initialized:
|
||||
*
|
||||
* This flag indicates whether this context is initialized: if the task
|
||||
* is not running then we can restore from this context, if the task
|
||||
* is running then we should save into this context.
|
||||
*/
|
||||
unsigned char initialized;
|
||||
|
||||
/*
|
||||
* @avx512_timestamp:
|
||||
*
|
||||
|
@@ -2,9 +2,11 @@
|
||||
#ifndef __ASM_X86_XSAVE_H
|
||||
#define __ASM_X86_XSAVE_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm/processor.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/user.h>
|
||||
|
||||
/* Bit 63 of XCR0 is reserved for future expansion */
|
||||
#define XFEATURE_MASK_EXTEND (~(XFEATURE_MASK_FPSSE | (1ULL << 63)))
|
||||
@@ -46,8 +48,8 @@ extern void __init update_regset_xstate_info(unsigned int size,
|
||||
u64 xstate_mask);
|
||||
|
||||
void fpu__xstate_clear_all_cpu_caps(void);
|
||||
void *get_xsave_addr(struct xregs_state *xsave, int xstate);
|
||||
const void *get_xsave_field_ptr(int xstate_field);
|
||||
void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr);
|
||||
const void *get_xsave_field_ptr(int xfeature_nr);
|
||||
int using_compacted_format(void);
|
||||
int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset, unsigned int size);
|
||||
int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int offset, unsigned int size);
|
||||
|
@@ -17,8 +17,4 @@ static inline void arch_clear_hugepage_flags(struct page *page)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
|
||||
static inline bool gigantic_page_supported(void) { return true; }
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_HUGETLB_H */
|
||||
|
@@ -1,4 +1,4 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
/*
|
||||
* This file contains definitions from Hyper-V Hypervisor Top-Level Functional
|
||||
|
@@ -6,6 +6,8 @@
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
/* Provide __cpuidle; we can't safely include <linux/cpu.h> */
|
||||
#define __cpuidle __attribute__((__section__(".cpuidle.text")))
|
||||
|
||||
@@ -54,11 +56,13 @@ static inline void native_irq_enable(void)
|
||||
|
||||
static inline __cpuidle void native_safe_halt(void)
|
||||
{
|
||||
mds_idle_clear_cpu_buffers();
|
||||
asm volatile("sti; hlt": : :"memory");
|
||||
}
|
||||
|
||||
static inline __cpuidle void native_halt(void)
|
||||
{
|
||||
mds_idle_clear_cpu_buffers();
|
||||
asm volatile("hlt": : :"memory");
|
||||
}
|
||||
|
||||
|
@@ -2,6 +2,8 @@
|
||||
#ifndef _ASM_X86_MSR_INDEX_H
|
||||
#define _ASM_X86_MSR_INDEX_H
|
||||
|
||||
#include <linux/bits.h>
|
||||
|
||||
/*
|
||||
* CPU model specific register (MSR) numbers.
|
||||
*
|
||||
@@ -40,14 +42,14 @@
|
||||
/* Intel MSRs. Some also available on other CPUs */
|
||||
|
||||
#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
|
||||
#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
|
||||
#define SPEC_CTRL_IBRS BIT(0) /* Indirect Branch Restricted Speculation */
|
||||
#define SPEC_CTRL_STIBP_SHIFT 1 /* Single Thread Indirect Branch Predictor (STIBP) bit */
|
||||
#define SPEC_CTRL_STIBP (1 << SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */
|
||||
#define SPEC_CTRL_STIBP BIT(SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */
|
||||
#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
|
||||
#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
|
||||
#define SPEC_CTRL_SSBD BIT(SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
|
||||
|
||||
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
|
||||
#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
|
||||
#define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */
|
||||
|
||||
#define MSR_PPIN_CTL 0x0000004e
|
||||
#define MSR_PPIN 0x0000004f
|
||||
@@ -69,20 +71,25 @@
|
||||
#define MSR_MTRRcap 0x000000fe
|
||||
|
||||
#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
|
||||
#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
|
||||
#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
|
||||
#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH (1 << 3) /* Skip L1D flush on vmentry */
|
||||
#define ARCH_CAP_SSB_NO (1 << 4) /*
|
||||
* Not susceptible to Speculative Store Bypass
|
||||
* attack, so no Speculative Store Bypass
|
||||
* control required.
|
||||
*/
|
||||
#define ARCH_CAP_RDCL_NO BIT(0) /* Not susceptible to Meltdown */
|
||||
#define ARCH_CAP_IBRS_ALL BIT(1) /* Enhanced IBRS support */
|
||||
#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH BIT(3) /* Skip L1D flush on vmentry */
|
||||
#define ARCH_CAP_SSB_NO BIT(4) /*
|
||||
* Not susceptible to Speculative Store Bypass
|
||||
* attack, so no Speculative Store Bypass
|
||||
* control required.
|
||||
*/
|
||||
#define ARCH_CAP_MDS_NO BIT(5) /*
|
||||
* Not susceptible to
|
||||
* Microarchitectural Data
|
||||
* Sampling (MDS) vulnerabilities.
|
||||
*/
|
||||
|
||||
#define MSR_IA32_FLUSH_CMD 0x0000010b
|
||||
#define L1D_FLUSH (1 << 0) /*
|
||||
* Writeback and invalidate the
|
||||
* L1 data cache.
|
||||
*/
|
||||
#define L1D_FLUSH BIT(0) /*
|
||||
* Writeback and invalidate the
|
||||
* L1 data cache.
|
||||
*/
|
||||
|
||||
#define MSR_IA32_BBL_CR_CTL 0x00000119
|
||||
#define MSR_IA32_BBL_CR_CTL3 0x0000011e
|
||||
|
@@ -6,6 +6,7 @@
|
||||
#include <linux/sched/idle.h>
|
||||
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
#define MWAIT_SUBSTATE_MASK 0xf
|
||||
#define MWAIT_CSTATE_MASK 0xf
|
||||
@@ -40,6 +41,8 @@ static inline void __monitorx(const void *eax, unsigned long ecx,
|
||||
|
||||
static inline void __mwait(unsigned long eax, unsigned long ecx)
|
||||
{
|
||||
mds_idle_clear_cpu_buffers();
|
||||
|
||||
/* "mwait %eax, %ecx;" */
|
||||
asm volatile(".byte 0x0f, 0x01, 0xc9;"
|
||||
:: "a" (eax), "c" (ecx));
|
||||
@@ -74,6 +77,8 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
|
||||
static inline void __mwaitx(unsigned long eax, unsigned long ebx,
|
||||
unsigned long ecx)
|
||||
{
|
||||
/* No MDS buffer clear as this is AMD/HYGON only */
|
||||
|
||||
/* "mwaitx %eax, %ebx, %ecx;" */
|
||||
asm volatile(".byte 0x0f, 0x01, 0xfb;"
|
||||
:: "a" (eax), "b" (ebx), "c" (ecx));
|
||||
@@ -81,6 +86,8 @@ static inline void __mwaitx(unsigned long eax, unsigned long ebx,
|
||||
|
||||
static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
|
||||
{
|
||||
mds_idle_clear_cpu_buffers();
|
||||
|
||||
trace_hardirqs_on();
|
||||
/* "mwait %eax, %ecx;" */
|
||||
asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
|
||||
|
@@ -308,6 +308,56 @@ DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
|
||||
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
|
||||
DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(mds_user_clear);
|
||||
DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
|
||||
|
||||
#include <asm/segment.h>
|
||||
|
||||
/**
|
||||
* mds_clear_cpu_buffers - Mitigation for MDS vulnerability
|
||||
*
|
||||
* This uses the otherwise unused and obsolete VERW instruction in
|
||||
* combination with microcode which triggers a CPU buffer flush when the
|
||||
* instruction is executed.
|
||||
*/
|
||||
static inline void mds_clear_cpu_buffers(void)
|
||||
{
|
||||
static const u16 ds = __KERNEL_DS;
|
||||
|
||||
/*
|
||||
* Has to be the memory-operand variant because only that
|
||||
* guarantees the CPU buffer flush functionality according to
|
||||
* documentation. The register-operand variant does not.
|
||||
* Works with any segment selector, but a valid writable
|
||||
* data segment is the fastest variant.
|
||||
*
|
||||
* "cc" clobber is required because VERW modifies ZF.
|
||||
*/
|
||||
asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
|
||||
}
|
||||
|
||||
/**
|
||||
* mds_user_clear_cpu_buffers - Mitigation for MDS vulnerability
|
||||
*
|
||||
* Clear CPU buffers if the corresponding static key is enabled
|
||||
*/
|
||||
static inline void mds_user_clear_cpu_buffers(void)
|
||||
{
|
||||
if (static_branch_likely(&mds_user_clear))
|
||||
mds_clear_cpu_buffers();
|
||||
}
|
||||
|
||||
/**
|
||||
* mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
|
||||
*
|
||||
* Clear CPU buffers if the corresponding static key is enabled
|
||||
*/
|
||||
static inline void mds_idle_clear_cpu_buffers(void)
|
||||
{
|
||||
if (static_branch_likely(&mds_idle_clear))
|
||||
mds_clear_cpu_buffers();
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
|
@@ -23,6 +23,8 @@
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <asm/x86_init.h>
|
||||
#include <asm/fpu/xstate.h>
|
||||
#include <asm/fpu/api.h>
|
||||
|
||||
extern pgd_t early_top_pgt[PTRS_PER_PGD];
|
||||
int __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
|
||||
@@ -127,14 +129,29 @@ static inline int pte_dirty(pte_t pte)
|
||||
static inline u32 read_pkru(void)
|
||||
{
|
||||
if (boot_cpu_has(X86_FEATURE_OSPKE))
|
||||
return __read_pkru();
|
||||
return rdpkru();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void write_pkru(u32 pkru)
|
||||
{
|
||||
if (boot_cpu_has(X86_FEATURE_OSPKE))
|
||||
__write_pkru(pkru);
|
||||
struct pkru_state *pk;
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_OSPKE))
|
||||
return;
|
||||
|
||||
pk = get_xsave_addr(¤t->thread.fpu.state.xsave, XFEATURE_PKRU);
|
||||
|
||||
/*
|
||||
* The PKRU value in xstate needs to be in sync with the value that is
|
||||
* written to the CPU. The FPU restore on return to userland would
|
||||
* otherwise load the previous value again.
|
||||
*/
|
||||
fpregs_lock();
|
||||
if (pk)
|
||||
pk->pkru = pkru;
|
||||
__write_pkru(pkru);
|
||||
fpregs_unlock();
|
||||
}
|
||||
|
||||
static inline int pte_young(pte_t pte)
|
||||
@@ -1358,6 +1375,12 @@ static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
|
||||
#define PKRU_WD_BIT 0x2
|
||||
#define PKRU_BITS_PER_PKEY 2
|
||||
|
||||
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
|
||||
extern u32 init_pkru_value;
|
||||
#else
|
||||
#define init_pkru_value 0
|
||||
#endif
|
||||
|
||||
static inline bool __pkru_allows_read(u32 pkru, u16 pkey)
|
||||
{
|
||||
int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
|
||||
|
@@ -978,4 +978,10 @@ enum l1tf_mitigations {
|
||||
|
||||
extern enum l1tf_mitigations l1tf_mitigation;
|
||||
|
||||
enum mds_mitigations {
|
||||
MDS_MITIGATION_OFF,
|
||||
MDS_MITIGATION_FULL,
|
||||
MDS_MITIGATION_VMWERV,
|
||||
};
|
||||
|
||||
#endif /* _ASM_X86_PROCESSOR_H */
|
||||
|
@@ -92,7 +92,7 @@ static inline void native_write_cr8(unsigned long val)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
|
||||
static inline u32 __read_pkru(void)
|
||||
static inline u32 rdpkru(void)
|
||||
{
|
||||
u32 ecx = 0;
|
||||
u32 edx, pkru;
|
||||
@@ -107,7 +107,7 @@ static inline u32 __read_pkru(void)
|
||||
return pkru;
|
||||
}
|
||||
|
||||
static inline void __write_pkru(u32 pkru)
|
||||
static inline void wrpkru(u32 pkru)
|
||||
{
|
||||
u32 ecx = 0, edx = 0;
|
||||
|
||||
@@ -118,8 +118,21 @@ static inline void __write_pkru(u32 pkru)
|
||||
asm volatile(".byte 0x0f,0x01,0xef\n\t"
|
||||
: : "a" (pkru), "c"(ecx), "d"(edx));
|
||||
}
|
||||
|
||||
static inline void __write_pkru(u32 pkru)
|
||||
{
|
||||
/*
|
||||
* WRPKRU is relatively expensive compared to RDPKRU.
|
||||
* Avoid WRPKRU when it would not change the value.
|
||||
*/
|
||||
if (pkru == rdpkru())
|
||||
return;
|
||||
|
||||
wrpkru(pkru);
|
||||
}
|
||||
|
||||
#else
|
||||
static inline u32 __read_pkru(void)
|
||||
static inline u32 rdpkru(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@@ -105,7 +105,7 @@ static inline void syscall_set_arguments(struct task_struct *task,
|
||||
memcpy(®s->bx + i, args, n * sizeof(args[0]));
|
||||
}
|
||||
|
||||
static inline int syscall_get_arch(void)
|
||||
static inline int syscall_get_arch(struct task_struct *task)
|
||||
{
|
||||
return AUDIT_ARCH_I386;
|
||||
}
|
||||
@@ -160,10 +160,12 @@ static inline void syscall_set_arguments(struct task_struct *task,
|
||||
}
|
||||
}
|
||||
|
||||
static inline int syscall_get_arch(void)
|
||||
static inline int syscall_get_arch(struct task_struct *task)
|
||||
{
|
||||
/* x32 tasks should be considered AUDIT_ARCH_X86_64. */
|
||||
return in_ia32_syscall() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
|
||||
return (IS_ENABLED(CONFIG_IA32_EMULATION) &&
|
||||
task->thread_info.status & TS_COMPAT)
|
||||
? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
|
||||
}
|
||||
#endif /* CONFIG_X86_32 */
|
||||
|
||||
|
@@ -88,6 +88,7 @@ struct thread_info {
|
||||
#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
|
||||
#define TIF_UPROBE 12 /* breakpointed or singlestepping */
|
||||
#define TIF_PATCH_PENDING 13 /* pending live patching update */
|
||||
#define TIF_NEED_FPU_LOAD 14 /* load FPU on return to userspace */
|
||||
#define TIF_NOCPUID 15 /* CPUID is not accessible in userland */
|
||||
#define TIF_NOTSC 16 /* TSC is not accessible in userland */
|
||||
#define TIF_IA32 17 /* IA32 compatibility process */
|
||||
@@ -117,6 +118,7 @@ struct thread_info {
|
||||
#define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
|
||||
#define _TIF_UPROBE (1 << TIF_UPROBE)
|
||||
#define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING)
|
||||
#define _TIF_NEED_FPU_LOAD (1 << TIF_NEED_FPU_LOAD)
|
||||
#define _TIF_NOCPUID (1 << TIF_NOCPUID)
|
||||
#define _TIF_NOTSC (1 << TIF_NOTSC)
|
||||
#define _TIF_IA32 (1 << TIF_IA32)
|
||||
|
@@ -30,7 +30,7 @@ DECLARE_EVENT_CLASS(x86_exceptions,
|
||||
__entry->error_code = error_code;
|
||||
),
|
||||
|
||||
TP_printk("address=%pf ip=%pf error_code=0x%lx",
|
||||
TP_printk("address=%ps ip=%ps error_code=0x%lx",
|
||||
(void *)__entry->address, (void *)__entry->ip,
|
||||
__entry->error_code) );
|
||||
|
||||
|
@@ -13,22 +13,22 @@ DECLARE_EVENT_CLASS(x86_fpu,
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(struct fpu *, fpu)
|
||||
__field(bool, initialized)
|
||||
__field(bool, load_fpu)
|
||||
__field(u64, xfeatures)
|
||||
__field(u64, xcomp_bv)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->fpu = fpu;
|
||||
__entry->initialized = fpu->initialized;
|
||||
__entry->load_fpu = test_thread_flag(TIF_NEED_FPU_LOAD);
|
||||
if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
|
||||
__entry->xfeatures = fpu->state.xsave.header.xfeatures;
|
||||
__entry->xcomp_bv = fpu->state.xsave.header.xcomp_bv;
|
||||
}
|
||||
),
|
||||
TP_printk("x86/fpu: %p initialized: %d xfeatures: %llx xcomp_bv: %llx",
|
||||
TP_printk("x86/fpu: %p load: %d xfeatures: %llx xcomp_bv: %llx",
|
||||
__entry->fpu,
|
||||
__entry->initialized,
|
||||
__entry->load_fpu,
|
||||
__entry->xfeatures,
|
||||
__entry->xcomp_bv
|
||||
)
|
||||
@@ -64,11 +64,6 @@ DEFINE_EVENT(x86_fpu, x86_fpu_regs_deactivated,
|
||||
TP_ARGS(fpu)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(x86_fpu, x86_fpu_activate_state,
|
||||
TP_PROTO(struct fpu *fpu),
|
||||
TP_ARGS(fpu)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(x86_fpu, x86_fpu_init_state,
|
||||
TP_PROTO(struct fpu *fpu),
|
||||
TP_ARGS(fpu)
|
||||
|
Reference in New Issue
Block a user