Merge branch 'x86-timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 timer updates from Thomas Gleixner: "Early TSC based time stamping to allow better boot time analysis. This comes with a general cleanup of the TSC calibration code which grew warts and duct taping over the years and removes 250 lines of code. Initiated and mostly implemented by Pavel with help from various folks" * 'x86-timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (37 commits) x86/kvmclock: Mark kvm_get_preset_lpj() as __init x86/tsc: Consolidate init code sched/clock: Disable interrupts when calling generic_sched_clock_init() timekeeping: Prevent false warning when persistent clock is not available sched/clock: Close a hole in sched_clock_init() x86/tsc: Make use of tsc_calibrate_cpu_early() x86/tsc: Split native_calibrate_cpu() into early and late parts sched/clock: Use static key for sched_clock_running sched/clock: Enable sched clock early sched/clock: Move sched clock initialization and merge with generic clock x86/tsc: Use TSC as sched clock early x86/tsc: Initialize cyc2ns when tsc frequency is determined x86/tsc: Calibrate tsc only once ARM/time: Remove read_boot_clock64() s390/time: Remove read_boot_clock64() timekeeping: Default boot time offset to local_clock() timekeeping: Replace read_boot_clock64() with read_persistent_wall_and_boot_offset() s390/time: Add read_persistent_wall_and_boot_offset() x86/xen/time: Output xen sched_clock time from 0 x86/xen/time: Initialize pv xen time in init_hypervisor_platform() ...
This commit is contained in:
@@ -53,6 +53,7 @@
|
||||
*
|
||||
*/
|
||||
#include "sched.h"
|
||||
#include <linux/sched_clock.h>
|
||||
|
||||
/*
|
||||
* Scheduler clock - returns current time in nanosec units.
|
||||
@@ -66,12 +67,7 @@ unsigned long long __weak sched_clock(void)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sched_clock);
|
||||
|
||||
__read_mostly int sched_clock_running;
|
||||
|
||||
void sched_clock_init(void)
|
||||
{
|
||||
sched_clock_running = 1;
|
||||
}
|
||||
static DEFINE_STATIC_KEY_FALSE(sched_clock_running);
|
||||
|
||||
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
|
||||
/*
|
||||
@@ -195,17 +191,40 @@ void clear_sched_clock_stable(void)
|
||||
|
||||
smp_mb(); /* matches sched_clock_init_late() */
|
||||
|
||||
if (sched_clock_running == 2)
|
||||
if (static_key_count(&sched_clock_running.key) == 2)
|
||||
__clear_sched_clock_stable();
|
||||
}
|
||||
|
||||
static void __sched_clock_gtod_offset(void)
|
||||
{
|
||||
struct sched_clock_data *scd = this_scd();
|
||||
|
||||
__scd_stamp(scd);
|
||||
__gtod_offset = (scd->tick_raw + __sched_clock_offset) - scd->tick_gtod;
|
||||
}
|
||||
|
||||
void __init sched_clock_init(void)
|
||||
{
|
||||
/*
|
||||
* Set __gtod_offset such that once we mark sched_clock_running,
|
||||
* sched_clock_tick() continues where sched_clock() left off.
|
||||
*
|
||||
* Even if TSC is buggered, we're still UP at this point so it
|
||||
* can't really be out of sync.
|
||||
*/
|
||||
local_irq_disable();
|
||||
__sched_clock_gtod_offset();
|
||||
local_irq_enable();
|
||||
|
||||
static_branch_inc(&sched_clock_running);
|
||||
}
|
||||
/*
|
||||
* We run this as late_initcall() such that it runs after all built-in drivers,
|
||||
* notably: acpi_processor and intel_idle, which can mark the TSC as unstable.
|
||||
*/
|
||||
static int __init sched_clock_init_late(void)
|
||||
{
|
||||
sched_clock_running = 2;
|
||||
static_branch_inc(&sched_clock_running);
|
||||
/*
|
||||
* Ensure that it is impossible to not do a static_key update.
|
||||
*
|
||||
@@ -350,8 +369,8 @@ u64 sched_clock_cpu(int cpu)
|
||||
if (sched_clock_stable())
|
||||
return sched_clock() + __sched_clock_offset;
|
||||
|
||||
if (unlikely(!sched_clock_running))
|
||||
return 0ull;
|
||||
if (!static_branch_unlikely(&sched_clock_running))
|
||||
return sched_clock();
|
||||
|
||||
preempt_disable_notrace();
|
||||
scd = cpu_sdc(cpu);
|
||||
@@ -373,7 +392,7 @@ void sched_clock_tick(void)
|
||||
if (sched_clock_stable())
|
||||
return;
|
||||
|
||||
if (unlikely(!sched_clock_running))
|
||||
if (!static_branch_unlikely(&sched_clock_running))
|
||||
return;
|
||||
|
||||
lockdep_assert_irqs_disabled();
|
||||
@@ -385,8 +404,6 @@ void sched_clock_tick(void)
|
||||
|
||||
void sched_clock_tick_stable(void)
|
||||
{
|
||||
u64 gtod, clock;
|
||||
|
||||
if (!sched_clock_stable())
|
||||
return;
|
||||
|
||||
@@ -398,9 +415,7 @@ void sched_clock_tick_stable(void)
|
||||
* TSC to be unstable, any computation will be computing crap.
|
||||
*/
|
||||
local_irq_disable();
|
||||
gtod = ktime_get_ns();
|
||||
clock = sched_clock();
|
||||
__gtod_offset = (clock + __sched_clock_offset) - gtod;
|
||||
__sched_clock_gtod_offset();
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
@@ -434,9 +449,17 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
|
||||
|
||||
#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
|
||||
|
||||
void __init sched_clock_init(void)
|
||||
{
|
||||
static_branch_inc(&sched_clock_running);
|
||||
local_irq_disable();
|
||||
generic_sched_clock_init();
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
u64 sched_clock_cpu(int cpu)
|
||||
{
|
||||
if (unlikely(!sched_clock_running))
|
||||
if (!static_branch_unlikely(&sched_clock_running))
|
||||
return 0;
|
||||
|
||||
return sched_clock();
|
||||
|
@@ -5916,7 +5916,6 @@ void __init sched_init(void)
|
||||
int i, j;
|
||||
unsigned long alloc_size = 0, ptr;
|
||||
|
||||
sched_clock_init();
|
||||
wait_bit_init();
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
|
@@ -622,8 +622,6 @@ void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
|
||||
#undef PU
|
||||
}
|
||||
|
||||
extern __read_mostly int sched_clock_running;
|
||||
|
||||
static void print_cpu(struct seq_file *m, int cpu)
|
||||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
|
Reference in New Issue
Block a user