
commit ff083a2d972f56bebfd82409ca62e5dfce950961 upstream.
Protect perf_guest_cbs with RCU to fix multiple possible errors. Luckily,
all paths that read perf_guest_cbs already require RCU protection, e.g. to
protect the callback chains, so only the direct perf_guest_cbs touchpoints
need to be modified.
Bug #1 is a simple lack of WRITE_ONCE/READ_ONCE behavior to ensure
perf_guest_cbs isn't reloaded between a !NULL check and a dereference.
Fixed via the READ_ONCE() in rcu_dereference().
Bug #2 is that on weakly-ordered architectures, updates to the callbacks
themselves are not guaranteed to be visible before the pointer is made
visible to readers. Fixed by the smp_store_release() in
rcu_assign_pointer() when the new pointer is non-NULL.
Bug #3 is that, because the callbacks are global, it's possible for
readers to run in parallel with an unregisters, and thus a module
implementing the callbacks can be unloaded while readers are in flight,
resulting in a use-after-free. Fixed by a synchronize_rcu() call when
unregistering callbacks.
Bug #1 escaped notice because it's extremely unlikely a compiler will
reload perf_guest_cbs in this sequence. perf_guest_cbs does get reloaded
for future derefs, e.g. for ->is_user_mode(), but the ->is_in_guest()
guard all but guarantees the consumer will win the race, e.g. to nullify
perf_guest_cbs, KVM has to completely exit the guest and teardown down
all VMs before KVM start its module unload / unregister sequence. This
also makes it all but impossible to encounter bug #3.
Bug #2 has not been a problem because all architectures that register
callbacks are strongly ordered and/or have a static set of callbacks.
But with help, unloading kvm_intel can trigger bug #1 e.g. wrapping
perf_guest_cbs with READ_ONCE in perf_misc_flags() while spamming
kvm_intel module load/unload leads to:
BUG: kernel NULL pointer dereference, address: 0000000000000000
#PF: supervisor read access in kernel mode
#PF: error_code(0x0000) - not-present page
PGD 0 P4D 0
Oops: 0000 [#1] PREEMPT SMP
CPU: 6 PID: 1825 Comm: stress Not tainted 5.14.0-rc2+ #459
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
RIP: 0010:perf_misc_flags+0x1c/0x70
Call Trace:
perf_prepare_sample+0x53/0x6b0
perf_event_output_forward+0x67/0x160
__perf_event_overflow+0x52/0xf0
handle_pmi_common+0x207/0x300
intel_pmu_handle_irq+0xcf/0x410
perf_event_nmi_handler+0x28/0x50
nmi_handle+0xc7/0x260
default_do_nmi+0x6b/0x170
exc_nmi+0x103/0x130
asm_exc_nmi+0x76/0xbf
Fixes: 39447b386c
("perf: Enhance perf to allow for guest statistic collection from host")
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Cc: stable@vger.kernel.org
Link: https://lore.kernel.org/r/20211111020738.2512932-2-seanjc@google.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
143 lines
3.3 KiB
C
143 lines
3.3 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* ARM callchain support
|
|
*
|
|
* Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
|
|
* Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
|
|
*
|
|
* This code is based on the ARM OProfile backtrace code.
|
|
*/
|
|
#include <linux/perf_event.h>
|
|
#include <linux/uaccess.h>
|
|
|
|
#include <asm/stacktrace.h>
|
|
|
|
/*
|
|
* The registers we're interested in are at the end of the variable
|
|
* length saved register structure. The fp points at the end of this
|
|
* structure so the address of this struct is:
|
|
* (struct frame_tail *)(xxx->fp)-1
|
|
*
|
|
* This code has been adapted from the ARM OProfile support.
|
|
*/
|
|
struct frame_tail {
|
|
struct frame_tail __user *fp;
|
|
unsigned long sp;
|
|
unsigned long lr;
|
|
} __attribute__((packed));
|
|
|
|
/*
|
|
* Get the return address for a single stackframe and return a pointer to the
|
|
* next frame tail.
|
|
*/
|
|
static struct frame_tail __user *
|
|
user_backtrace(struct frame_tail __user *tail,
|
|
struct perf_callchain_entry_ctx *entry)
|
|
{
|
|
struct frame_tail buftail;
|
|
unsigned long err;
|
|
|
|
if (!access_ok(tail, sizeof(buftail)))
|
|
return NULL;
|
|
|
|
pagefault_disable();
|
|
err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
|
|
pagefault_enable();
|
|
|
|
if (err)
|
|
return NULL;
|
|
|
|
perf_callchain_store(entry, buftail.lr);
|
|
|
|
/*
|
|
* Frame pointers should strictly progress back up the stack
|
|
* (towards higher addresses).
|
|
*/
|
|
if (tail + 1 >= buftail.fp)
|
|
return NULL;
|
|
|
|
return buftail.fp - 1;
|
|
}
|
|
|
|
void
|
|
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
|
{
|
|
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
|
struct frame_tail __user *tail;
|
|
|
|
if (guest_cbs && guest_cbs->is_in_guest()) {
|
|
/* We don't support guest os callchain now */
|
|
return;
|
|
}
|
|
|
|
perf_callchain_store(entry, regs->ARM_pc);
|
|
|
|
if (!current->mm)
|
|
return;
|
|
|
|
tail = (struct frame_tail __user *)regs->ARM_fp - 1;
|
|
|
|
while ((entry->nr < entry->max_stack) &&
|
|
tail && !((unsigned long)tail & 0x3))
|
|
tail = user_backtrace(tail, entry);
|
|
}
|
|
|
|
/*
|
|
* Gets called by walk_stackframe() for every stackframe. This will be called
|
|
* whist unwinding the stackframe and is like a subroutine return so we use
|
|
* the PC.
|
|
*/
|
|
static int
|
|
callchain_trace(struct stackframe *fr,
|
|
void *data)
|
|
{
|
|
struct perf_callchain_entry_ctx *entry = data;
|
|
perf_callchain_store(entry, fr->pc);
|
|
return 0;
|
|
}
|
|
|
|
void
|
|
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
|
{
|
|
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
|
struct stackframe fr;
|
|
|
|
if (guest_cbs && guest_cbs->is_in_guest()) {
|
|
/* We don't support guest os callchain now */
|
|
return;
|
|
}
|
|
|
|
arm_get_current_stackframe(regs, &fr);
|
|
walk_stackframe(&fr, callchain_trace, entry);
|
|
}
|
|
|
|
unsigned long perf_instruction_pointer(struct pt_regs *regs)
|
|
{
|
|
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
|
|
|
if (guest_cbs && guest_cbs->is_in_guest())
|
|
return guest_cbs->get_guest_ip();
|
|
|
|
return instruction_pointer(regs);
|
|
}
|
|
|
|
unsigned long perf_misc_flags(struct pt_regs *regs)
|
|
{
|
|
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
|
|
int misc = 0;
|
|
|
|
if (guest_cbs && guest_cbs->is_in_guest()) {
|
|
if (guest_cbs->is_user_mode())
|
|
misc |= PERF_RECORD_MISC_GUEST_USER;
|
|
else
|
|
misc |= PERF_RECORD_MISC_GUEST_KERNEL;
|
|
} else {
|
|
if (user_mode(regs))
|
|
misc |= PERF_RECORD_MISC_USER;
|
|
else
|
|
misc |= PERF_RECORD_MISC_KERNEL;
|
|
}
|
|
|
|
return misc;
|
|
}
|