sched/cputime: Spare a seqcount lock/unlock cycle on context switch
On context switch we are locking the vtime seqcount of the scheduling-out task twice: * On vtime_task_switch_common(), when we flush the pending vtime through vtime_account_system() * On arch_vtime_task_switch() to reset the vtime state. This is pointless as these actions can be performed without the need to unlock/lock in the middle. The reason these steps are separated is to consolidate a very small amount of common code between CONFIG_VIRT_CPU_ACCOUNTING_GEN and CONFIG_VIRT_CPU_ACCOUNTING_NATIVE. Performance in this fast path is definitely a priority over artificial code factorization so split the task switch code between GEN and NATIVE and mutualize the parts than can run under a single seqcount locked block. As a side effect, vtime_account_idle() becomes included in the seqcount protection. This happens to be a welcome preparation in order to properly support kcpustat under vtime in the future and fetch CPUTIME_IDLE without race. Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Wanpeng Li <wanpengli@tencent.com> Cc: Yauheni Kaliuta <yauheni.kaliuta@redhat.com> Link: https://lkml.kernel.org/r/20191003161745.28464-3-frederic@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:

committed by
Ingo Molnar

parent
f83eeb1a01
commit
8d495477d6
@@ -14,8 +14,12 @@ struct task_struct;
|
||||
* vtime_accounting_cpu_enabled() definitions/declarations
|
||||
*/
|
||||
#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE)
|
||||
|
||||
static inline bool vtime_accounting_cpu_enabled(void) { return true; }
|
||||
extern void vtime_task_switch(struct task_struct *prev);
|
||||
|
||||
#elif defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN)
|
||||
|
||||
/*
|
||||
* Checks if vtime is enabled on some CPU. Cputime readers want to be careful
|
||||
* in that case and compute the tickless cputime.
|
||||
@@ -36,33 +40,29 @@ static inline bool vtime_accounting_cpu_enabled(void)
|
||||
|
||||
return false;
|
||||
}
|
||||
#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
|
||||
static inline bool vtime_accounting_cpu_enabled(void) { return false; }
|
||||
#endif
|
||||
|
||||
extern void vtime_task_switch_generic(struct task_struct *prev);
|
||||
|
||||
static inline void vtime_task_switch(struct task_struct *prev)
|
||||
{
|
||||
if (vtime_accounting_cpu_enabled())
|
||||
vtime_task_switch_generic(prev);
|
||||
}
|
||||
|
||||
#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
|
||||
|
||||
static inline bool vtime_accounting_cpu_enabled(void) { return false; }
|
||||
static inline void vtime_task_switch(struct task_struct *prev) { }
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Common vtime APIs
|
||||
*/
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
|
||||
#ifdef __ARCH_HAS_VTIME_TASK_SWITCH
|
||||
extern void vtime_task_switch(struct task_struct *prev);
|
||||
#else
|
||||
extern void vtime_common_task_switch(struct task_struct *prev);
|
||||
static inline void vtime_task_switch(struct task_struct *prev)
|
||||
{
|
||||
if (vtime_accounting_cpu_enabled())
|
||||
vtime_common_task_switch(prev);
|
||||
}
|
||||
#endif /* __ARCH_HAS_VTIME_TASK_SWITCH */
|
||||
|
||||
extern void vtime_account_kernel(struct task_struct *tsk);
|
||||
extern void vtime_account_idle(struct task_struct *tsk);
|
||||
|
||||
#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
|
||||
|
||||
static inline void vtime_task_switch(struct task_struct *prev) { }
|
||||
static inline void vtime_account_kernel(struct task_struct *tsk) { }
|
||||
#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
|
||||
|
||||
|
Reference in New Issue
Block a user