powerpc/64s/idle: Move soft interrupt mask logic into C code
This simplifies the asm and fixes irq-off tracing over sleep instructions. Also move powersave_nap check for POWER8 into C code, and move PSSCR register value calculation for POWER9 into C. Reviewed-by: Gautham R. Shenoy <ego@linux.vnet.ibm.com> Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:

committed by
Michael Ellerman

parent
42bed04255
commit
2201f994a5
@@ -109,13 +109,9 @@ core_idle_lock_held:
|
||||
/*
|
||||
* Pass requested state in r3:
|
||||
* r3 - PNV_THREAD_NAP/SLEEP/WINKLE in POWER8
|
||||
* - Requested STOP state in POWER9
|
||||
* - Requested PSSCR value in POWER9
|
||||
*
|
||||
* To check IRQ_HAPPENED in r4
|
||||
* 0 - don't check
|
||||
* 1 - check
|
||||
*
|
||||
* Address to 'rfid' to in r5
|
||||
* Address of idle handler to 'rfid' to in r4
|
||||
*/
|
||||
pnv_powersave_common:
|
||||
/* Use r3 to pass state nap/sleep/winkle */
|
||||
@@ -131,30 +127,7 @@ pnv_powersave_common:
|
||||
std r0,_LINK(r1)
|
||||
std r0,_NIP(r1)
|
||||
|
||||
/* Hard disable interrupts */
|
||||
mfmsr r9
|
||||
rldicl r9,r9,48,1
|
||||
rotldi r9,r9,16
|
||||
mtmsrd r9,1 /* hard-disable interrupts */
|
||||
|
||||
/* Check if something happened while soft-disabled */
|
||||
lbz r0,PACAIRQHAPPENED(r13)
|
||||
andi. r0,r0,~PACA_IRQ_HARD_DIS@l
|
||||
beq 1f
|
||||
cmpwi cr0,r4,0
|
||||
beq 1f
|
||||
addi r1,r1,INT_FRAME_SIZE
|
||||
ld r0,16(r1)
|
||||
li r3,0 /* Return 0 (no nap) */
|
||||
mtlr r0
|
||||
blr
|
||||
|
||||
1: /* We mark irqs hard disabled as this is the state we'll
|
||||
* be in when returning and we need to tell arch_local_irq_restore()
|
||||
* about it
|
||||
*/
|
||||
li r0,PACA_IRQ_HARD_DIS
|
||||
stb r0,PACAIRQHAPPENED(r13)
|
||||
mfmsr r9
|
||||
|
||||
/* We haven't lost state ... yet */
|
||||
li r0,0
|
||||
@@ -163,8 +136,8 @@ pnv_powersave_common:
|
||||
/* Continue saving state */
|
||||
SAVE_GPR(2, r1)
|
||||
SAVE_NVGPRS(r1)
|
||||
mfcr r4
|
||||
std r4,_CCR(r1)
|
||||
mfcr r5
|
||||
std r5,_CCR(r1)
|
||||
std r9,_MSR(r1)
|
||||
std r1,PACAR1(r13)
|
||||
|
||||
@@ -178,7 +151,7 @@ pnv_powersave_common:
|
||||
li r6, MSR_RI
|
||||
andc r6, r9, r6
|
||||
mtmsrd r6, 1 /* clear RI before setting SRR0/1 */
|
||||
mtspr SPRN_SRR0, r5
|
||||
mtspr SPRN_SRR0, r4
|
||||
mtspr SPRN_SRR1, r7
|
||||
rfid
|
||||
|
||||
@@ -322,35 +295,14 @@ lwarx_loop_stop:
|
||||
|
||||
IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP)
|
||||
|
||||
_GLOBAL(power7_idle)
|
||||
/*
|
||||
* Entered with MSR[EE]=0 and no soft-masked interrupts pending.
|
||||
* r3 contains desired idle state (PNV_THREAD_NAP/SLEEP/WINKLE).
|
||||
*/
|
||||
_GLOBAL(power7_idle_insn)
|
||||
/* Now check if user or arch enabled NAP mode */
|
||||
LOAD_REG_ADDRBASE(r3,powersave_nap)
|
||||
lwz r4,ADDROFF(powersave_nap)(r3)
|
||||
cmpwi 0,r4,0
|
||||
beqlr
|
||||
li r3, 1
|
||||
/* fall through */
|
||||
|
||||
_GLOBAL(power7_nap)
|
||||
mr r4,r3
|
||||
li r3,PNV_THREAD_NAP
|
||||
LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode)
|
||||
LOAD_REG_ADDR(r4, pnv_enter_arch207_idle_mode)
|
||||
b pnv_powersave_common
|
||||
/* No return */
|
||||
|
||||
_GLOBAL(power7_sleep)
|
||||
li r3,PNV_THREAD_SLEEP
|
||||
li r4,1
|
||||
LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode)
|
||||
b pnv_powersave_common
|
||||
/* No return */
|
||||
|
||||
_GLOBAL(power7_winkle)
|
||||
li r3,PNV_THREAD_WINKLE
|
||||
li r4,1
|
||||
LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode)
|
||||
b pnv_powersave_common
|
||||
/* No return */
|
||||
|
||||
#define CHECK_HMI_INTERRUPT \
|
||||
mfspr r0,SPRN_SRR1; \
|
||||
@@ -372,17 +324,13 @@ ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \
|
||||
20: nop;
|
||||
|
||||
/*
|
||||
* r3 - The PSSCR value corresponding to the stop state.
|
||||
* r4 - The PSSCR mask corrresonding to the stop state.
|
||||
* Entered with MSR[EE]=0 and no soft-masked interrupts pending.
|
||||
* r3 contains desired PSSCR register value.
|
||||
*/
|
||||
_GLOBAL(power9_idle_stop)
|
||||
mfspr r5,SPRN_PSSCR
|
||||
andc r5,r5,r4
|
||||
or r3,r3,r5
|
||||
std r3, PACA_REQ_PSSCR(r13)
|
||||
mtspr SPRN_PSSCR,r3
|
||||
LOAD_REG_ADDR(r5,power_enter_stop)
|
||||
li r4,1
|
||||
LOAD_REG_ADDR(r4,power_enter_stop)
|
||||
b pnv_powersave_common
|
||||
/* No return */
|
||||
|
||||
|
@@ -322,7 +322,8 @@ bool prep_irq_for_idle(void)
|
||||
* First we need to hard disable to ensure no interrupt
|
||||
* occurs before we effectively enter the low power state
|
||||
*/
|
||||
hard_irq_disable();
|
||||
__hard_irq_disable();
|
||||
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
|
||||
|
||||
/*
|
||||
* If anything happened while we were soft-disabled,
|
||||
@@ -347,6 +348,36 @@ bool prep_irq_for_idle(void)
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is for idle sequences that return with IRQs off, but the
|
||||
* idle state itself wakes on interrupt. Tell the irq tracer that
|
||||
* IRQs are enabled for the duration of idle so it does not get long
|
||||
* off times. Must be paired with fini_irq_for_idle_irqsoff.
|
||||
*/
|
||||
bool prep_irq_for_idle_irqsoff(void)
|
||||
{
|
||||
WARN_ON(!irqs_disabled());
|
||||
|
||||
/*
|
||||
* First we need to hard disable to ensure no interrupt
|
||||
* occurs before we effectively enter the low power state
|
||||
*/
|
||||
__hard_irq_disable();
|
||||
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
|
||||
|
||||
/*
|
||||
* If anything happened while we were soft-disabled,
|
||||
* we return now and do not enter the low power state.
|
||||
*/
|
||||
if (lazy_irq_pending())
|
||||
return false;
|
||||
|
||||
/* Tell lockdep we are about to re-enable */
|
||||
trace_hardirqs_on();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Force a replay of the external interrupt handler on this CPU.
|
||||
*/
|
||||
|
Reference in New Issue
Block a user