Merge branch 'x86-timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 timer updates from Thomas Gleixner: "Early TSC based time stamping to allow better boot time analysis. This comes with a general cleanup of the TSC calibration code which grew warts and duct taping over the years and removes 250 lines of code. Initiated and mostly implemented by Pavel with help from various folks" * 'x86-timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (37 commits) x86/kvmclock: Mark kvm_get_preset_lpj() as __init x86/tsc: Consolidate init code sched/clock: Disable interrupts when calling generic_sched_clock_init() timekeeping: Prevent false warning when persistent clock is not available sched/clock: Close a hole in sched_clock_init() x86/tsc: Make use of tsc_calibrate_cpu_early() x86/tsc: Split native_calibrate_cpu() into early and late parts sched/clock: Use static key for sched_clock_running sched/clock: Enable sched clock early sched/clock: Move sched clock initialization and merge with generic clock x86/tsc: Use TSC as sched clock early x86/tsc: Initialize cyc2ns when tsc frequency is determined x86/tsc: Calibrate tsc only once ARM/time: Remove read_boot_clock64() s390/time: Remove read_boot_clock64() timekeeping: Default boot time offset to local_clock() timekeeping: Replace read_boot_clock64() with read_persistent_wall_and_boot_offset() s390/time: Add read_persistent_wall_and_boot_offset() x86/xen/time: Output xen sched_clock time from 0 x86/xen/time: Initialize pv xen time in init_hypervisor_platform() ...
This commit is contained in:
@@ -232,8 +232,6 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
|
||||
}
|
||||
}
|
||||
|
||||
set_cpu_cap(c, X86_FEATURE_K7);
|
||||
|
||||
/* calling is from identify_secondary_cpu() ? */
|
||||
if (!c->cpu_index)
|
||||
return;
|
||||
@@ -617,6 +615,14 @@ static void early_init_amd(struct cpuinfo_x86 *c)
|
||||
|
||||
early_init_amd_mc(c);
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
if (c->x86 == 6)
|
||||
set_cpu_cap(c, X86_FEATURE_K7);
|
||||
#endif
|
||||
|
||||
if (c->x86 >= 0xf)
|
||||
set_cpu_cap(c, X86_FEATURE_K8);
|
||||
|
||||
rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
|
||||
|
||||
/*
|
||||
@@ -863,9 +869,6 @@ static void init_amd(struct cpuinfo_x86 *c)
|
||||
|
||||
init_amd_cacheinfo(c);
|
||||
|
||||
if (c->x86 >= 0xf)
|
||||
set_cpu_cap(c, X86_FEATURE_K8);
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_XMM2)) {
|
||||
unsigned long long val;
|
||||
int ret;
|
||||
|
@@ -1018,6 +1018,24 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
|
||||
}
|
||||
|
||||
/*
|
||||
* The NOPL instruction is supposed to exist on all CPUs of family >= 6;
|
||||
* unfortunately, that's not true in practice because of early VIA
|
||||
* chips and (more importantly) broken virtualizers that are not easy
|
||||
* to detect. In the latter case it doesn't even *fail* reliably, so
|
||||
* probing for it doesn't even work. Disable it completely on 32-bit
|
||||
* unless we can find a reliable way to detect all the broken cases.
|
||||
* Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
|
||||
*/
|
||||
static void detect_nopl(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_32
|
||||
setup_clear_cpu_cap(X86_FEATURE_NOPL);
|
||||
#else
|
||||
setup_force_cpu_cap(X86_FEATURE_NOPL);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Do minimum CPU detection early.
|
||||
* Fields really needed: vendor, cpuid_level, family, model, mask,
|
||||
@@ -1092,6 +1110,8 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
|
||||
*/
|
||||
if (!pgtable_l5_enabled())
|
||||
setup_clear_cpu_cap(X86_FEATURE_LA57);
|
||||
|
||||
detect_nopl();
|
||||
}
|
||||
|
||||
void __init early_cpu_init(void)
|
||||
@@ -1127,24 +1147,6 @@ void __init early_cpu_init(void)
|
||||
early_identify_cpu(&boot_cpu_data);
|
||||
}
|
||||
|
||||
/*
|
||||
* The NOPL instruction is supposed to exist on all CPUs of family >= 6;
|
||||
* unfortunately, that's not true in practice because of early VIA
|
||||
* chips and (more importantly) broken virtualizers that are not easy
|
||||
* to detect. In the latter case it doesn't even *fail* reliably, so
|
||||
* probing for it doesn't even work. Disable it completely on 32-bit
|
||||
* unless we can find a reliable way to detect all the broken cases.
|
||||
* Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
|
||||
*/
|
||||
static void detect_nopl(struct cpuinfo_x86 *c)
|
||||
{
|
||||
#ifdef CONFIG_X86_32
|
||||
clear_cpu_cap(c, X86_FEATURE_NOPL);
|
||||
#else
|
||||
set_cpu_cap(c, X86_FEATURE_NOPL);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void detect_null_seg_behavior(struct cpuinfo_x86 *c)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
@@ -1207,8 +1209,6 @@ static void generic_identify(struct cpuinfo_x86 *c)
|
||||
|
||||
get_model_name(c); /* Default name */
|
||||
|
||||
detect_nopl(c);
|
||||
|
||||
detect_null_seg_behavior(c);
|
||||
|
||||
/*
|
||||
|
Referens i nytt ärende
Block a user