Merge tag 'x86-entry-2020-08-04' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 conversion to generic entry code from Thomas Gleixner: "The conversion of X86 syscall, interrupt and exception entry/exit handling to the generic code. Pretty much a straight-forward 1:1 conversion plus the consolidation of the KVM handling of pending work before entering guest mode" * tag 'x86-entry-2020-08-04' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/kvm: Use __xfer_to_guest_mode_work_pending() in kvm_run_vcpu() x86/kvm: Use generic xfer to guest work function x86/entry: Cleanup idtentry_enter/exit x86/entry: Use generic interrupt entry/exit code x86/entry: Cleanup idtentry_entry/exit_user x86/entry: Use generic syscall exit functionality x86/entry: Use generic syscall entry function x86/ptrace: Provide pt_regs helper for entry/exit x86/entry: Move user return notifier out of loop x86/entry: Consolidate 32/64 bit syscall entry x86/entry: Consolidate check_user_regs() x86: Correct noinstr qualifiers x86/idtentry: Remove stale comment
This commit is contained in:
@@ -1047,7 +1047,7 @@ static __always_inline int patch_cmp(const void *key, const void *elt)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int noinstr poke_int3_handler(struct pt_regs *regs)
|
||||
noinstr int poke_int3_handler(struct pt_regs *regs)
|
||||
{
|
||||
struct bp_patching_desc *desc;
|
||||
struct text_poke_loc *tp;
|
||||
|
@@ -1215,7 +1215,7 @@ static void kill_me_maybe(struct callback_head *cb)
|
||||
* backing the user stack, tracing that reads the user stack will cause
|
||||
* potentially infinite recursion.
|
||||
*/
|
||||
void noinstr do_machine_check(struct pt_regs *regs)
|
||||
noinstr void do_machine_check(struct pt_regs *regs)
|
||||
{
|
||||
DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
|
||||
DECLARE_BITMAP(toclear, MAX_NR_BANKS);
|
||||
@@ -1930,11 +1930,11 @@ static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
|
||||
|
||||
static __always_inline void exc_machine_check_user(struct pt_regs *regs)
|
||||
{
|
||||
idtentry_enter_user(regs);
|
||||
irqentry_enter_from_user_mode(regs);
|
||||
instrumentation_begin();
|
||||
machine_check_vector(regs);
|
||||
instrumentation_end();
|
||||
idtentry_exit_user(regs);
|
||||
irqentry_exit_to_user_mode(regs);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
@@ -233,7 +233,7 @@ EXPORT_SYMBOL_GPL(kvm_read_and_reset_apf_flags);
|
||||
noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
|
||||
{
|
||||
u32 reason = kvm_read_and_reset_apf_flags();
|
||||
idtentry_state_t state;
|
||||
irqentry_state_t state;
|
||||
|
||||
switch (reason) {
|
||||
case KVM_PV_REASON_PAGE_NOT_PRESENT:
|
||||
@@ -243,7 +243,7 @@ noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
|
||||
return false;
|
||||
}
|
||||
|
||||
state = idtentry_enter(regs);
|
||||
state = irqentry_enter(regs);
|
||||
instrumentation_begin();
|
||||
|
||||
/*
|
||||
@@ -264,7 +264,7 @@ noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
|
||||
}
|
||||
|
||||
instrumentation_end();
|
||||
idtentry_exit(regs, state);
|
||||
irqentry_exit(regs, state);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@@ -25,6 +25,7 @@
|
||||
#include <linux/user-return-notifier.h>
|
||||
#include <linux/uprobes.h>
|
||||
#include <linux/context_tracking.h>
|
||||
#include <linux/entry-common.h>
|
||||
#include <linux/syscalls.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
@@ -803,7 +804,7 @@ static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
|
||||
* want to handle. Thus you cannot kill init even with a SIGKILL even by
|
||||
* mistake.
|
||||
*/
|
||||
void do_signal(struct pt_regs *regs)
|
||||
void arch_do_signal(struct pt_regs *regs)
|
||||
{
|
||||
struct ksignal ksig;
|
||||
|
||||
|
@@ -245,7 +245,7 @@ static noinstr bool handle_bug(struct pt_regs *regs)
|
||||
|
||||
DEFINE_IDTENTRY_RAW(exc_invalid_op)
|
||||
{
|
||||
idtentry_state_t state;
|
||||
irqentry_state_t state;
|
||||
|
||||
/*
|
||||
* We use UD2 as a short encoding for 'CALL __WARN', as such
|
||||
@@ -255,11 +255,11 @@ DEFINE_IDTENTRY_RAW(exc_invalid_op)
|
||||
if (!user_mode(regs) && handle_bug(regs))
|
||||
return;
|
||||
|
||||
state = idtentry_enter(regs);
|
||||
state = irqentry_enter(regs);
|
||||
instrumentation_begin();
|
||||
handle_invalid_op(regs);
|
||||
instrumentation_end();
|
||||
idtentry_exit(regs, state);
|
||||
irqentry_exit(regs, state);
|
||||
}
|
||||
|
||||
DEFINE_IDTENTRY(exc_coproc_segment_overrun)
|
||||
@@ -638,18 +638,18 @@ DEFINE_IDTENTRY_RAW(exc_int3)
|
||||
return;
|
||||
|
||||
/*
|
||||
* idtentry_enter_user() uses static_branch_{,un}likely() and therefore
|
||||
* can trigger INT3, hence poke_int3_handler() must be done
|
||||
* before. If the entry came from kernel mode, then use nmi_enter()
|
||||
* because the INT3 could have been hit in any context including
|
||||
* NMI.
|
||||
* irqentry_enter_from_user_mode() uses static_branch_{,un}likely()
|
||||
* and therefore can trigger INT3, hence poke_int3_handler() must
|
||||
* be done before. If the entry came from kernel mode, then use
|
||||
* nmi_enter() because the INT3 could have been hit in any context
|
||||
* including NMI.
|
||||
*/
|
||||
if (user_mode(regs)) {
|
||||
idtentry_enter_user(regs);
|
||||
irqentry_enter_from_user_mode(regs);
|
||||
instrumentation_begin();
|
||||
do_int3_user(regs);
|
||||
instrumentation_end();
|
||||
idtentry_exit_user(regs);
|
||||
irqentry_exit_to_user_mode(regs);
|
||||
} else {
|
||||
bool irq_state = idtentry_enter_nmi(regs);
|
||||
instrumentation_begin();
|
||||
@@ -895,13 +895,13 @@ static __always_inline void exc_debug_user(struct pt_regs *regs,
|
||||
*/
|
||||
WARN_ON_ONCE(!user_mode(regs));
|
||||
|
||||
idtentry_enter_user(regs);
|
||||
irqentry_enter_from_user_mode(regs);
|
||||
instrumentation_begin();
|
||||
|
||||
handle_debug(regs, dr6, true);
|
||||
|
||||
instrumentation_end();
|
||||
idtentry_exit_user(regs);
|
||||
irqentry_exit_to_user_mode(regs);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
Reference in New Issue
Block a user