powerpc/64/sycall: Implement syscall entry/exit logic in C
System call entry and particularly exit code is beyond the limit of what is reasonable to implement in asm. This conversion moves all conditional branches out of the asm code, except for the case that all GPRs should be restored at exit. Null syscall test is about 5% faster after this patch, because the exit work is handled under local_irq_disable, and the hard mask and pending interrupt replay is handled after that, which avoids games with MSR. mpe: Includes subsequent fixes from Nick: This fixes 4 issues caught by TM selftests. First was a tm-syscall bug that hit due to tabort_syscall being called after interrupts were reconciled (in a subsequent patch), which led to interrupts being enabled before tabort_syscall was called. Rather than going through an un-reconciling interrupts for the return, I just go back to putting the test early in asm, the C-ification of that wasn't a big win anyway. Second is the syscall return _TIF_USER_WORK_MASK check would go into an infinite loop if _TIF_RESTORE_TM became set. The asm code uses _TIF_USER_WORK_MASK to brach to slowpath which includes restore_tm_state. Third is system call return was not calling restore_tm_state, I missed this completely (alhtough it's in the return from interrupt C conversion because when the asm syscall code encountered problems it would branch to the interrupt return code. Fourth is MSR_VEC missing from restore_math, which was caught by tm-unavailable selftest taking an unexpected facility unavailable interrupt when testing VSX unavailble exception with MSR.FP=1 MSR.VEC=1. Fourth case also has a fixup in a subsequent patch. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michal Suchanek <msuchanek@suse.de> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20200225173541.1549955-26-npiggin@gmail.com
This commit is contained in:

committed by
Michael Ellerman

parent
f14f8a2032
commit
68b34588e2
@@ -43,9 +43,12 @@ static inline unsigned long cputime_to_usecs(const cputime_t ct)
|
||||
*/
|
||||
#ifdef CONFIG_PPC64
|
||||
#define get_accounting(tsk) (&get_paca()->accounting)
|
||||
#define raw_get_accounting(tsk) (&local_paca->accounting)
|
||||
static inline void arch_vtime_task_switch(struct task_struct *tsk) { }
|
||||
|
||||
#else
|
||||
#define get_accounting(tsk) (&task_thread_info(tsk)->accounting)
|
||||
#define raw_get_accounting(tsk) get_accounting(tsk)
|
||||
/*
|
||||
* Called from the context switch with interrupts disabled, to charge all
|
||||
* accumulated times to the current process, and to prepare accounting on
|
||||
@@ -60,6 +63,36 @@ static inline void arch_vtime_task_switch(struct task_struct *prev)
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* account_cpu_user_entry/exit runs "unreconciled", so can't trace,
|
||||
* can't use use get_paca()
|
||||
*/
|
||||
static notrace inline void account_cpu_user_entry(void)
|
||||
{
|
||||
unsigned long tb = mftb();
|
||||
struct cpu_accounting_data *acct = raw_get_accounting(current);
|
||||
|
||||
acct->utime += (tb - acct->starttime_user);
|
||||
acct->starttime = tb;
|
||||
}
|
||||
|
||||
static notrace inline void account_cpu_user_exit(void)
|
||||
{
|
||||
unsigned long tb = mftb();
|
||||
struct cpu_accounting_data *acct = raw_get_accounting(current);
|
||||
|
||||
acct->stime += (tb - acct->starttime);
|
||||
acct->starttime_user = tb;
|
||||
}
|
||||
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#else /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
|
||||
static inline void account_cpu_user_entry(void)
|
||||
{
|
||||
}
|
||||
static inline void account_cpu_user_exit(void)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
|
||||
#endif /* __POWERPC_CPUTIME_H */
|
||||
|
Reference in New Issue
Block a user