xtensa: implement fake NMI
In case perf IRQ is the highest of the medium-level IRQs, and is alone on its level, it may be treated as NMI: - LOCKLEVEL is defined to be one level less than EXCM level, - IRQ masking never lowers current IRQ level, - new fake exception cause code, EXCCAUSE_MAPPED_NMI is assigned to that IRQ; new second level exception handler, do_nmi, assigned to it handles it as NMI, - atomic operations in configurations without s32c1i still need to mask all interrupts. Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
This commit is contained in:
@@ -1,6 +1,4 @@
|
||||
/*
|
||||
* arch/xtensa/kernel/entry.S
|
||||
*
|
||||
* Low-level exception handling
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
@@ -8,6 +6,7 @@
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 2004 - 2008 by Tensilica Inc.
|
||||
* Copyright (C) 2015 Cadence Design Systems Inc.
|
||||
*
|
||||
* Chris Zankel <chris@zankel.net>
|
||||
*
|
||||
@@ -75,6 +74,27 @@
|
||||
#endif
|
||||
.endm
|
||||
|
||||
|
||||
.macro irq_save flags tmp
|
||||
#if XTENSA_FAKE_NMI
|
||||
#if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL
|
||||
rsr \flags, ps
|
||||
extui \tmp, \flags, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
|
||||
bgei \tmp, LOCKLEVEL, 99f
|
||||
rsil \tmp, LOCKLEVEL
|
||||
99:
|
||||
#else
|
||||
movi \tmp, LOCKLEVEL
|
||||
rsr \flags, ps
|
||||
or \flags, \flags, \tmp
|
||||
xsr \flags, ps
|
||||
rsync
|
||||
#endif
|
||||
#else
|
||||
rsil \flags, LOCKLEVEL
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */
|
||||
|
||||
/*
|
||||
@@ -352,11 +372,11 @@ common_exception:
|
||||
|
||||
/* It is now save to restore the EXC_TABLE_FIXUP variable. */
|
||||
|
||||
rsr a0, exccause
|
||||
rsr a2, exccause
|
||||
movi a3, 0
|
||||
rsr a2, excsave1
|
||||
s32i a0, a1, PT_EXCCAUSE
|
||||
s32i a3, a2, EXC_TABLE_FIXUP
|
||||
rsr a0, excsave1
|
||||
s32i a2, a1, PT_EXCCAUSE
|
||||
s32i a3, a0, EXC_TABLE_FIXUP
|
||||
|
||||
/* All unrecoverable states are saved on stack, now, and a1 is valid.
|
||||
* Now we can allow exceptions again. In case we've got an interrupt
|
||||
@@ -367,19 +387,46 @@ common_exception:
|
||||
*/
|
||||
|
||||
rsr a3, ps
|
||||
addi a0, a0, -EXCCAUSE_LEVEL1_INTERRUPT
|
||||
movi a2, LOCKLEVEL
|
||||
s32i a3, a1, PT_PS # save ps
|
||||
|
||||
#if XTENSA_FAKE_NMI
|
||||
/* Correct PS needs to be saved in the PT_PS:
|
||||
* - in case of exception or level-1 interrupt it's in the PS,
|
||||
* and is already saved.
|
||||
* - in case of medium level interrupt it's in the excsave2.
|
||||
*/
|
||||
movi a0, EXCCAUSE_MAPPED_NMI
|
||||
extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
|
||||
beq a2, a0, .Lmedium_level_irq
|
||||
bnei a2, EXCCAUSE_LEVEL1_INTERRUPT, .Lexception
|
||||
beqz a3, .Llevel1_irq # level-1 IRQ sets ps.intlevel to 0
|
||||
|
||||
.Lmedium_level_irq:
|
||||
rsr a0, excsave2
|
||||
s32i a0, a1, PT_PS # save medium-level interrupt ps
|
||||
bgei a3, LOCKLEVEL, .Lexception
|
||||
|
||||
.Llevel1_irq:
|
||||
movi a3, LOCKLEVEL
|
||||
|
||||
.Lexception:
|
||||
movi a0, 1 << PS_WOE_BIT
|
||||
or a3, a3, a0
|
||||
#else
|
||||
addi a2, a2, -EXCCAUSE_LEVEL1_INTERRUPT
|
||||
movi a0, LOCKLEVEL
|
||||
extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
|
||||
# a3 = PS.INTLEVEL
|
||||
moveqz a3, a2, a0 # a3 = LOCKLEVEL iff interrupt
|
||||
moveqz a3, a0, a2 # a3 = LOCKLEVEL iff interrupt
|
||||
movi a2, 1 << PS_WOE_BIT
|
||||
or a3, a3, a2
|
||||
rsr a2, exccause
|
||||
#endif
|
||||
|
||||
/* restore return address (or 0 if return to userspace) */
|
||||
rsr a0, depc
|
||||
xsr a3, ps
|
||||
|
||||
s32i a3, a1, PT_PS # save ps
|
||||
wsr a3, ps
|
||||
rsync # PS.WOE => rsync => overflow
|
||||
|
||||
/* Save lbeg, lend */
|
||||
|
||||
@@ -417,8 +464,13 @@ common_exception:
|
||||
.global common_exception_return
|
||||
common_exception_return:
|
||||
|
||||
#if XTENSA_FAKE_NMI
|
||||
l32i a2, a1, PT_EXCCAUSE
|
||||
movi a3, EXCCAUSE_MAPPED_NMI
|
||||
beq a2, a3, .LNMIexit
|
||||
#endif
|
||||
1:
|
||||
rsil a2, LOCKLEVEL
|
||||
irq_save a2, a3
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
movi a4, trace_hardirqs_off
|
||||
callx4 a4
|
||||
@@ -481,6 +533,12 @@ common_exception_return:
|
||||
j 1b
|
||||
#endif
|
||||
|
||||
#if XTENSA_FAKE_NMI
|
||||
.LNMIexit:
|
||||
l32i a3, a1, PT_PS
|
||||
_bbci.l a3, PS_UM_BIT, 4f
|
||||
#endif
|
||||
|
||||
5:
|
||||
#ifdef CONFIG_DEBUG_TLB_SANITY
|
||||
l32i a4, a1, PT_DEPC
|
||||
@@ -1564,6 +1622,13 @@ ENTRY(fast_second_level_miss)
|
||||
rfde
|
||||
|
||||
9: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
|
||||
bnez a0, 8b
|
||||
|
||||
/* Even more unlikely case active_mm == 0.
|
||||
* We can get here with NMI in the middle of context_switch that
|
||||
* touches vmalloc area.
|
||||
*/
|
||||
movi a0, init_mm
|
||||
j 8b
|
||||
|
||||
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
|
||||
@@ -1867,7 +1932,7 @@ ENTRY(_switch_to)
|
||||
|
||||
/* Disable ints while we manipulate the stack pointer. */
|
||||
|
||||
rsil a14, LOCKLEVEL
|
||||
irq_save a14, a3
|
||||
rsync
|
||||
|
||||
/* Switch CPENABLE */
|
||||
|
@@ -29,6 +29,7 @@
|
||||
#include <asm/platform.h>
|
||||
|
||||
atomic_t irq_err_count;
|
||||
DECLARE_PER_CPU(unsigned long, nmi_count);
|
||||
|
||||
asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
|
||||
{
|
||||
@@ -57,11 +58,18 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
|
||||
|
||||
int arch_show_interrupts(struct seq_file *p, int prec)
|
||||
{
|
||||
unsigned cpu __maybe_unused;
|
||||
#ifdef CONFIG_SMP
|
||||
show_ipi_list(p, prec);
|
||||
#endif
|
||||
seq_printf(p, "%*s: ", prec, "ERR");
|
||||
seq_printf(p, "%10u\n", atomic_read(&irq_err_count));
|
||||
#if XTENSA_FAKE_NMI
|
||||
seq_printf(p, "%*s:", prec, "NMI");
|
||||
for_each_online_cpu(cpu)
|
||||
seq_printf(p, " %10lu", per_cpu(nmi_count, cpu));
|
||||
seq_puts(p, " Non-maskable interrupts\n");
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -359,7 +359,7 @@ void perf_event_print_debug(void)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id)
|
||||
irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id)
|
||||
{
|
||||
irqreturn_t rc = IRQ_NONE;
|
||||
struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events);
|
||||
@@ -436,10 +436,14 @@ static int __init xtensa_pmu_init(void)
|
||||
int irq = irq_create_mapping(NULL, XCHAL_PROFILING_INTERRUPT);
|
||||
|
||||
perf_cpu_notifier(xtensa_pmu_notifier);
|
||||
#if XTENSA_FAKE_NMI
|
||||
enable_irq(irq);
|
||||
#else
|
||||
ret = request_irq(irq, xtensa_pmu_irq_handler, IRQF_PERCPU,
|
||||
"pmu", NULL);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
#endif
|
||||
|
||||
ret = perf_pmu_register(&xtensa_pmu, "cpu", PERF_TYPE_RAW);
|
||||
if (ret)
|
||||
|
@@ -62,6 +62,7 @@ extern void fast_coprocessor(void);
|
||||
|
||||
extern void do_illegal_instruction (struct pt_regs*);
|
||||
extern void do_interrupt (struct pt_regs*);
|
||||
extern void do_nmi(struct pt_regs *);
|
||||
extern void do_unaligned_user (struct pt_regs*);
|
||||
extern void do_multihit (struct pt_regs*, unsigned long);
|
||||
extern void do_page_fault (struct pt_regs*, unsigned long);
|
||||
@@ -146,6 +147,9 @@ COPROCESSOR(6),
|
||||
#if XTENSA_HAVE_COPROCESSOR(7)
|
||||
COPROCESSOR(7),
|
||||
#endif
|
||||
#if XTENSA_FAKE_NMI
|
||||
{ EXCCAUSE_MAPPED_NMI, 0, do_nmi },
|
||||
#endif
|
||||
{ EXCCAUSE_MAPPED_DEBUG, 0, do_debug },
|
||||
{ -1, -1, 0 }
|
||||
|
||||
@@ -199,6 +203,28 @@ void do_multihit(struct pt_regs *regs, unsigned long exccause)
|
||||
|
||||
extern void do_IRQ(int, struct pt_regs *);
|
||||
|
||||
#if XTENSA_FAKE_NMI
|
||||
|
||||
irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id);
|
||||
|
||||
DEFINE_PER_CPU(unsigned long, nmi_count);
|
||||
|
||||
void do_nmi(struct pt_regs *regs)
|
||||
{
|
||||
struct pt_regs *old_regs;
|
||||
|
||||
if ((regs->ps & PS_INTLEVEL_MASK) < LOCKLEVEL)
|
||||
trace_hardirqs_off();
|
||||
|
||||
old_regs = set_irq_regs(regs);
|
||||
nmi_enter();
|
||||
++*this_cpu_ptr(&nmi_count);
|
||||
xtensa_pmu_irq_handler(0, NULL);
|
||||
nmi_exit();
|
||||
set_irq_regs(old_regs);
|
||||
}
|
||||
#endif
|
||||
|
||||
void do_interrupt(struct pt_regs *regs)
|
||||
{
|
||||
static const unsigned int_level_mask[] = {
|
||||
|
@@ -627,7 +627,11 @@ ENTRY(_Level\level\()InterruptVector)
|
||||
wsr a0, excsave2
|
||||
rsr a0, epc\level
|
||||
wsr a0, epc1
|
||||
.if \level <= LOCKLEVEL
|
||||
movi a0, EXCCAUSE_LEVEL1_INTERRUPT
|
||||
.else
|
||||
movi a0, EXCCAUSE_MAPPED_NMI
|
||||
.endif
|
||||
wsr a0, exccause
|
||||
rsr a0, eps\level
|
||||
# branch to user or kernel vector
|
||||
@@ -682,11 +686,13 @@ ENDPROC(_WindowOverflow4)
|
||||
.align 4
|
||||
_SimulateUserKernelVectorException:
|
||||
addi a0, a0, (1 << PS_EXCM_BIT)
|
||||
#if !XTENSA_FAKE_NMI
|
||||
wsr a0, ps
|
||||
#endif
|
||||
bbsi.l a0, PS_UM_BIT, 1f # branch if user mode
|
||||
rsr a0, excsave2 # restore a0
|
||||
xsr a0, excsave2 # restore a0
|
||||
j _KernelExceptionVector # simulate kernel vector exception
|
||||
1: rsr a0, excsave2 # restore a0
|
||||
1: xsr a0, excsave2 # restore a0
|
||||
j _UserExceptionVector # simulate user vector exception
|
||||
#endif
|
||||
|
||||
|
Reference in New Issue
Block a user